xref: /linux/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c (revision 0a94608f0f7de9b1135ffea3546afe68eafef57f)
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28 
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc_link_dp.h"
32 #include "link_enc_cfg.h"
33 #include "dc/inc/core_types.h"
34 #include "dal_asic_id.h"
35 #include "dmub/dmub_srv.h"
36 #include "dc/inc/hw/dmcu.h"
37 #include "dc/inc/hw/abm.h"
38 #include "dc/dc_dmub_srv.h"
39 #include "dc/dc_edid_parser.h"
40 #include "dc/dc_stat.h"
41 #include "amdgpu_dm_trace.h"
42 
43 #include "vid.h"
44 #include "amdgpu.h"
45 #include "amdgpu_display.h"
46 #include "amdgpu_ucode.h"
47 #include "atom.h"
48 #include "amdgpu_dm.h"
49 #ifdef CONFIG_DRM_AMD_DC_HDCP
50 #include "amdgpu_dm_hdcp.h"
51 #include <drm/display/drm_hdcp_helper.h>
52 #endif
53 #include "amdgpu_pm.h"
54 #include "amdgpu_atombios.h"
55 
56 #include "amd_shared.h"
57 #include "amdgpu_dm_irq.h"
58 #include "dm_helpers.h"
59 #include "amdgpu_dm_mst_types.h"
60 #if defined(CONFIG_DEBUG_FS)
61 #include "amdgpu_dm_debugfs.h"
62 #endif
63 #include "amdgpu_dm_psr.h"
64 
65 #include "ivsrcid/ivsrcid_vislands30.h"
66 
67 #include "i2caux_interface.h"
68 #include <linux/module.h>
69 #include <linux/moduleparam.h>
70 #include <linux/types.h>
71 #include <linux/pm_runtime.h>
72 #include <linux/pci.h>
73 #include <linux/firmware.h>
74 #include <linux/component.h>
75 #include <linux/dmi.h>
76 
77 #include <drm/display/drm_dp_mst_helper.h>
78 #include <drm/display/drm_hdmi_helper.h>
79 #include <drm/drm_atomic.h>
80 #include <drm/drm_atomic_uapi.h>
81 #include <drm/drm_atomic_helper.h>
82 #include <drm/drm_fb_helper.h>
83 #include <drm/drm_fourcc.h>
84 #include <drm/drm_edid.h>
85 #include <drm/drm_vblank.h>
86 #include <drm/drm_audio_component.h>
87 
88 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
89 
90 #include "dcn/dcn_1_0_offset.h"
91 #include "dcn/dcn_1_0_sh_mask.h"
92 #include "soc15_hw_ip.h"
93 #include "soc15_common.h"
94 #include "vega10_ip_offset.h"
95 
96 #include "soc15_common.h"
97 
98 #include "gc/gc_11_0_0_offset.h"
99 #include "gc/gc_11_0_0_sh_mask.h"
100 
101 #include "modules/inc/mod_freesync.h"
102 #include "modules/power/power_helpers.h"
103 #include "modules/inc/mod_info_packet.h"
104 
105 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
106 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
107 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
108 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
109 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
110 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
111 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
112 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
113 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
114 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
115 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
116 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
117 #define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
118 MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
119 #define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin"
120 MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB);
121 #define FIRMWARE_DCN_315_DMUB "amdgpu/dcn_3_1_5_dmcub.bin"
122 MODULE_FIRMWARE(FIRMWARE_DCN_315_DMUB);
123 #define FIRMWARE_DCN316_DMUB "amdgpu/dcn_3_1_6_dmcub.bin"
124 MODULE_FIRMWARE(FIRMWARE_DCN316_DMUB);
125 
126 #define FIRMWARE_DCN_V3_2_0_DMCUB "amdgpu/dcn_3_2_0_dmcub.bin"
127 MODULE_FIRMWARE(FIRMWARE_DCN_V3_2_0_DMCUB);
128 #define FIRMWARE_DCN_V3_2_1_DMCUB "amdgpu/dcn_3_2_1_dmcub.bin"
129 MODULE_FIRMWARE(FIRMWARE_DCN_V3_2_1_DMCUB);
130 
131 #define FIRMWARE_RAVEN_DMCU		"amdgpu/raven_dmcu.bin"
132 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
133 
134 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
135 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
136 
137 /* Number of bytes in PSP header for firmware. */
138 #define PSP_HEADER_BYTES 0x100
139 
140 /* Number of bytes in PSP footer for firmware. */
141 #define PSP_FOOTER_BYTES 0x100
142 
143 /**
144  * DOC: overview
145  *
146  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
147  * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
148  * requests into DC requests, and DC responses into DRM responses.
149  *
150  * The root control structure is &struct amdgpu_display_manager.
151  */
152 
153 /* basic init/fini API */
154 static int amdgpu_dm_init(struct amdgpu_device *adev);
155 static void amdgpu_dm_fini(struct amdgpu_device *adev);
156 static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
157 
158 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
159 {
160 	switch (link->dpcd_caps.dongle_type) {
161 	case DISPLAY_DONGLE_NONE:
162 		return DRM_MODE_SUBCONNECTOR_Native;
163 	case DISPLAY_DONGLE_DP_VGA_CONVERTER:
164 		return DRM_MODE_SUBCONNECTOR_VGA;
165 	case DISPLAY_DONGLE_DP_DVI_CONVERTER:
166 	case DISPLAY_DONGLE_DP_DVI_DONGLE:
167 		return DRM_MODE_SUBCONNECTOR_DVID;
168 	case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
169 	case DISPLAY_DONGLE_DP_HDMI_DONGLE:
170 		return DRM_MODE_SUBCONNECTOR_HDMIA;
171 	case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
172 	default:
173 		return DRM_MODE_SUBCONNECTOR_Unknown;
174 	}
175 }
176 
177 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
178 {
179 	struct dc_link *link = aconnector->dc_link;
180 	struct drm_connector *connector = &aconnector->base;
181 	enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
182 
183 	if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
184 		return;
185 
186 	if (aconnector->dc_sink)
187 		subconnector = get_subconnector_type(link);
188 
189 	drm_object_property_set_value(&connector->base,
190 			connector->dev->mode_config.dp_subconnector_property,
191 			subconnector);
192 }
193 
194 /*
195  * initializes drm_device display related structures, based on the information
196  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
197  * drm_encoder, drm_mode_config
198  *
199  * Returns 0 on success
200  */
201 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
202 /* removes and deallocates the drm structures, created by the above function */
203 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
204 
205 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
206 				struct drm_plane *plane,
207 				unsigned long possible_crtcs,
208 				const struct dc_plane_cap *plane_cap);
209 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
210 			       struct drm_plane *plane,
211 			       uint32_t link_index);
212 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
213 				    struct amdgpu_dm_connector *amdgpu_dm_connector,
214 				    uint32_t link_index,
215 				    struct amdgpu_encoder *amdgpu_encoder);
216 static int amdgpu_dm_encoder_init(struct drm_device *dev,
217 				  struct amdgpu_encoder *aencoder,
218 				  uint32_t link_index);
219 
220 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
221 
222 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
223 
224 static int amdgpu_dm_atomic_check(struct drm_device *dev,
225 				  struct drm_atomic_state *state);
226 
227 static void handle_cursor_update(struct drm_plane *plane,
228 				 struct drm_plane_state *old_plane_state);
229 
230 static const struct drm_format_info *
231 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
232 
233 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector);
234 static void handle_hpd_rx_irq(void *param);
235 
236 static bool
237 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
238 				 struct drm_crtc_state *new_crtc_state);
239 /*
240  * dm_vblank_get_counter
241  *
242  * @brief
243  * Get counter for number of vertical blanks
244  *
245  * @param
246  * struct amdgpu_device *adev - [in] desired amdgpu device
247  * int disp_idx - [in] which CRTC to get the counter from
248  *
249  * @return
250  * Counter for vertical blanks
251  */
252 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
253 {
254 	if (crtc >= adev->mode_info.num_crtc)
255 		return 0;
256 	else {
257 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
258 
259 		if (acrtc->dm_irq_params.stream == NULL) {
260 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
261 				  crtc);
262 			return 0;
263 		}
264 
265 		return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
266 	}
267 }
268 
269 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
270 				  u32 *vbl, u32 *position)
271 {
272 	uint32_t v_blank_start, v_blank_end, h_position, v_position;
273 
274 	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
275 		return -EINVAL;
276 	else {
277 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
278 
279 		if (acrtc->dm_irq_params.stream ==  NULL) {
280 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
281 				  crtc);
282 			return 0;
283 		}
284 
285 		/*
286 		 * TODO rework base driver to use values directly.
287 		 * for now parse it back into reg-format
288 		 */
289 		dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
290 					 &v_blank_start,
291 					 &v_blank_end,
292 					 &h_position,
293 					 &v_position);
294 
295 		*position = v_position | (h_position << 16);
296 		*vbl = v_blank_start | (v_blank_end << 16);
297 	}
298 
299 	return 0;
300 }
301 
302 static bool dm_is_idle(void *handle)
303 {
304 	/* XXX todo */
305 	return true;
306 }
307 
308 static int dm_wait_for_idle(void *handle)
309 {
310 	/* XXX todo */
311 	return 0;
312 }
313 
314 static bool dm_check_soft_reset(void *handle)
315 {
316 	return false;
317 }
318 
319 static int dm_soft_reset(void *handle)
320 {
321 	/* XXX todo */
322 	return 0;
323 }
324 
325 static struct amdgpu_crtc *
326 get_crtc_by_otg_inst(struct amdgpu_device *adev,
327 		     int otg_inst)
328 {
329 	struct drm_device *dev = adev_to_drm(adev);
330 	struct drm_crtc *crtc;
331 	struct amdgpu_crtc *amdgpu_crtc;
332 
333 	if (WARN_ON(otg_inst == -1))
334 		return adev->mode_info.crtcs[0];
335 
336 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
337 		amdgpu_crtc = to_amdgpu_crtc(crtc);
338 
339 		if (amdgpu_crtc->otg_inst == otg_inst)
340 			return amdgpu_crtc;
341 	}
342 
343 	return NULL;
344 }
345 
346 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
347 {
348 	return acrtc->dm_irq_params.freesync_config.state ==
349 		       VRR_STATE_ACTIVE_VARIABLE ||
350 	       acrtc->dm_irq_params.freesync_config.state ==
351 		       VRR_STATE_ACTIVE_FIXED;
352 }
353 
354 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
355 {
356 	return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
357 	       dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
358 }
359 
360 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
361 					      struct dm_crtc_state *new_state)
362 {
363 	if (new_state->freesync_config.state ==  VRR_STATE_ACTIVE_FIXED)
364 		return true;
365 	else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
366 		return true;
367 	else
368 		return false;
369 }
370 
371 /**
372  * dm_pflip_high_irq() - Handle pageflip interrupt
373  * @interrupt_params: ignored
374  *
375  * Handles the pageflip interrupt by notifying all interested parties
376  * that the pageflip has been completed.
377  */
378 static void dm_pflip_high_irq(void *interrupt_params)
379 {
380 	struct amdgpu_crtc *amdgpu_crtc;
381 	struct common_irq_params *irq_params = interrupt_params;
382 	struct amdgpu_device *adev = irq_params->adev;
383 	unsigned long flags;
384 	struct drm_pending_vblank_event *e;
385 	uint32_t vpos, hpos, v_blank_start, v_blank_end;
386 	bool vrr_active;
387 
388 	amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
389 
390 	/* IRQ could occur when in initial stage */
391 	/* TODO work and BO cleanup */
392 	if (amdgpu_crtc == NULL) {
393 		DC_LOG_PFLIP("CRTC is null, returning.\n");
394 		return;
395 	}
396 
397 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
398 
399 	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
400 		DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
401 						 amdgpu_crtc->pflip_status,
402 						 AMDGPU_FLIP_SUBMITTED,
403 						 amdgpu_crtc->crtc_id,
404 						 amdgpu_crtc);
405 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
406 		return;
407 	}
408 
409 	/* page flip completed. */
410 	e = amdgpu_crtc->event;
411 	amdgpu_crtc->event = NULL;
412 
413 	WARN_ON(!e);
414 
415 	vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
416 
417 	/* Fixed refresh rate, or VRR scanout position outside front-porch? */
418 	if (!vrr_active ||
419 	    !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
420 				      &v_blank_end, &hpos, &vpos) ||
421 	    (vpos < v_blank_start)) {
422 		/* Update to correct count and vblank timestamp if racing with
423 		 * vblank irq. This also updates to the correct vblank timestamp
424 		 * even in VRR mode, as scanout is past the front-porch atm.
425 		 */
426 		drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
427 
428 		/* Wake up userspace by sending the pageflip event with proper
429 		 * count and timestamp of vblank of flip completion.
430 		 */
431 		if (e) {
432 			drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
433 
434 			/* Event sent, so done with vblank for this flip */
435 			drm_crtc_vblank_put(&amdgpu_crtc->base);
436 		}
437 	} else if (e) {
438 		/* VRR active and inside front-porch: vblank count and
439 		 * timestamp for pageflip event will only be up to date after
440 		 * drm_crtc_handle_vblank() has been executed from late vblank
441 		 * irq handler after start of back-porch (vline 0). We queue the
442 		 * pageflip event for send-out by drm_crtc_handle_vblank() with
443 		 * updated timestamp and count, once it runs after us.
444 		 *
445 		 * We need to open-code this instead of using the helper
446 		 * drm_crtc_arm_vblank_event(), as that helper would
447 		 * call drm_crtc_accurate_vblank_count(), which we must
448 		 * not call in VRR mode while we are in front-porch!
449 		 */
450 
451 		/* sequence will be replaced by real count during send-out. */
452 		e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
453 		e->pipe = amdgpu_crtc->crtc_id;
454 
455 		list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
456 		e = NULL;
457 	}
458 
459 	/* Keep track of vblank of this flip for flip throttling. We use the
460 	 * cooked hw counter, as that one incremented at start of this vblank
461 	 * of pageflip completion, so last_flip_vblank is the forbidden count
462 	 * for queueing new pageflips if vsync + VRR is enabled.
463 	 */
464 	amdgpu_crtc->dm_irq_params.last_flip_vblank =
465 		amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
466 
467 	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
468 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
469 
470 	DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
471 		     amdgpu_crtc->crtc_id, amdgpu_crtc,
472 		     vrr_active, (int) !e);
473 }
474 
475 static void dm_vupdate_high_irq(void *interrupt_params)
476 {
477 	struct common_irq_params *irq_params = interrupt_params;
478 	struct amdgpu_device *adev = irq_params->adev;
479 	struct amdgpu_crtc *acrtc;
480 	struct drm_device *drm_dev;
481 	struct drm_vblank_crtc *vblank;
482 	ktime_t frame_duration_ns, previous_timestamp;
483 	unsigned long flags;
484 	int vrr_active;
485 
486 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
487 
488 	if (acrtc) {
489 		vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
490 		drm_dev = acrtc->base.dev;
491 		vblank = &drm_dev->vblank[acrtc->base.index];
492 		previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
493 		frame_duration_ns = vblank->time - previous_timestamp;
494 
495 		if (frame_duration_ns > 0) {
496 			trace_amdgpu_refresh_rate_track(acrtc->base.index,
497 						frame_duration_ns,
498 						ktime_divns(NSEC_PER_SEC, frame_duration_ns));
499 			atomic64_set(&irq_params->previous_timestamp, vblank->time);
500 		}
501 
502 		DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
503 			      acrtc->crtc_id,
504 			      vrr_active);
505 
506 		/* Core vblank handling is done here after end of front-porch in
507 		 * vrr mode, as vblank timestamping will give valid results
508 		 * while now done after front-porch. This will also deliver
509 		 * page-flip completion events that have been queued to us
510 		 * if a pageflip happened inside front-porch.
511 		 */
512 		if (vrr_active) {
513 			drm_crtc_handle_vblank(&acrtc->base);
514 
515 			/* BTR processing for pre-DCE12 ASICs */
516 			if (acrtc->dm_irq_params.stream &&
517 			    adev->family < AMDGPU_FAMILY_AI) {
518 				spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
519 				mod_freesync_handle_v_update(
520 				    adev->dm.freesync_module,
521 				    acrtc->dm_irq_params.stream,
522 				    &acrtc->dm_irq_params.vrr_params);
523 
524 				dc_stream_adjust_vmin_vmax(
525 				    adev->dm.dc,
526 				    acrtc->dm_irq_params.stream,
527 				    &acrtc->dm_irq_params.vrr_params.adjust);
528 				spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
529 			}
530 		}
531 	}
532 }
533 
534 /**
535  * dm_crtc_high_irq() - Handles CRTC interrupt
536  * @interrupt_params: used for determining the CRTC instance
537  *
538  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
539  * event handler.
540  */
541 static void dm_crtc_high_irq(void *interrupt_params)
542 {
543 	struct common_irq_params *irq_params = interrupt_params;
544 	struct amdgpu_device *adev = irq_params->adev;
545 	struct amdgpu_crtc *acrtc;
546 	unsigned long flags;
547 	int vrr_active;
548 
549 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
550 	if (!acrtc)
551 		return;
552 
553 	vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
554 
555 	DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
556 		      vrr_active, acrtc->dm_irq_params.active_planes);
557 
558 	/**
559 	 * Core vblank handling at start of front-porch is only possible
560 	 * in non-vrr mode, as only there vblank timestamping will give
561 	 * valid results while done in front-porch. Otherwise defer it
562 	 * to dm_vupdate_high_irq after end of front-porch.
563 	 */
564 	if (!vrr_active)
565 		drm_crtc_handle_vblank(&acrtc->base);
566 
567 	/**
568 	 * Following stuff must happen at start of vblank, for crc
569 	 * computation and below-the-range btr support in vrr mode.
570 	 */
571 	amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
572 
573 	/* BTR updates need to happen before VUPDATE on Vega and above. */
574 	if (adev->family < AMDGPU_FAMILY_AI)
575 		return;
576 
577 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
578 
579 	if (acrtc->dm_irq_params.stream &&
580 	    acrtc->dm_irq_params.vrr_params.supported &&
581 	    acrtc->dm_irq_params.freesync_config.state ==
582 		    VRR_STATE_ACTIVE_VARIABLE) {
583 		mod_freesync_handle_v_update(adev->dm.freesync_module,
584 					     acrtc->dm_irq_params.stream,
585 					     &acrtc->dm_irq_params.vrr_params);
586 
587 		dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
588 					   &acrtc->dm_irq_params.vrr_params.adjust);
589 	}
590 
591 	/*
592 	 * If there aren't any active_planes then DCH HUBP may be clock-gated.
593 	 * In that case, pageflip completion interrupts won't fire and pageflip
594 	 * completion events won't get delivered. Prevent this by sending
595 	 * pending pageflip events from here if a flip is still pending.
596 	 *
597 	 * If any planes are enabled, use dm_pflip_high_irq() instead, to
598 	 * avoid race conditions between flip programming and completion,
599 	 * which could cause too early flip completion events.
600 	 */
601 	if (adev->family >= AMDGPU_FAMILY_RV &&
602 	    acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
603 	    acrtc->dm_irq_params.active_planes == 0) {
604 		if (acrtc->event) {
605 			drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
606 			acrtc->event = NULL;
607 			drm_crtc_vblank_put(&acrtc->base);
608 		}
609 		acrtc->pflip_status = AMDGPU_FLIP_NONE;
610 	}
611 
612 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
613 }
614 
615 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
616 /**
617  * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
618  * DCN generation ASICs
619  * @interrupt_params: interrupt parameters
620  *
621  * Used to set crc window/read out crc value at vertical line 0 position
622  */
623 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
624 {
625 	struct common_irq_params *irq_params = interrupt_params;
626 	struct amdgpu_device *adev = irq_params->adev;
627 	struct amdgpu_crtc *acrtc;
628 
629 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
630 
631 	if (!acrtc)
632 		return;
633 
634 	amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
635 }
636 #endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */
637 
638 /**
639  * dmub_aux_setconfig_callback - Callback for AUX or SET_CONFIG command.
640  * @adev: amdgpu_device pointer
641  * @notify: dmub notification structure
642  *
643  * Dmub AUX or SET_CONFIG command completion processing callback
644  * Copies dmub notification to DM which is to be read by AUX command.
645  * issuing thread and also signals the event to wake up the thread.
646  */
647 static void dmub_aux_setconfig_callback(struct amdgpu_device *adev,
648 					struct dmub_notification *notify)
649 {
650 	if (adev->dm.dmub_notify)
651 		memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification));
652 	if (notify->type == DMUB_NOTIFICATION_AUX_REPLY)
653 		complete(&adev->dm.dmub_aux_transfer_done);
654 }
655 
656 /**
657  * dmub_hpd_callback - DMUB HPD interrupt processing callback.
658  * @adev: amdgpu_device pointer
659  * @notify: dmub notification structure
660  *
661  * Dmub Hpd interrupt processing callback. Gets displayindex through the
662  * ink index and calls helper to do the processing.
663  */
664 static void dmub_hpd_callback(struct amdgpu_device *adev,
665 			      struct dmub_notification *notify)
666 {
667 	struct amdgpu_dm_connector *aconnector;
668 	struct amdgpu_dm_connector *hpd_aconnector = NULL;
669 	struct drm_connector *connector;
670 	struct drm_connector_list_iter iter;
671 	struct dc_link *link;
672 	uint8_t link_index = 0;
673 	struct drm_device *dev;
674 
675 	if (adev == NULL)
676 		return;
677 
678 	if (notify == NULL) {
679 		DRM_ERROR("DMUB HPD callback notification was NULL");
680 		return;
681 	}
682 
683 	if (notify->link_index > adev->dm.dc->link_count) {
684 		DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index);
685 		return;
686 	}
687 
688 	link_index = notify->link_index;
689 	link = adev->dm.dc->links[link_index];
690 	dev = adev->dm.ddev;
691 
692 	drm_connector_list_iter_begin(dev, &iter);
693 	drm_for_each_connector_iter(connector, &iter) {
694 		aconnector = to_amdgpu_dm_connector(connector);
695 		if (link && aconnector->dc_link == link) {
696 			DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index);
697 			hpd_aconnector = aconnector;
698 			break;
699 		}
700 	}
701 	drm_connector_list_iter_end(&iter);
702 
703 	if (hpd_aconnector) {
704 		if (notify->type == DMUB_NOTIFICATION_HPD)
705 			handle_hpd_irq_helper(hpd_aconnector);
706 		else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ)
707 			handle_hpd_rx_irq(hpd_aconnector);
708 	}
709 }
710 
711 /**
712  * register_dmub_notify_callback - Sets callback for DMUB notify
713  * @adev: amdgpu_device pointer
714  * @type: Type of dmub notification
715  * @callback: Dmub interrupt callback function
716  * @dmub_int_thread_offload: offload indicator
717  *
718  * API to register a dmub callback handler for a dmub notification
719  * Also sets indicator whether callback processing to be offloaded.
720  * to dmub interrupt handling thread
721  * Return: true if successfully registered, false if there is existing registration
722  */
723 static bool register_dmub_notify_callback(struct amdgpu_device *adev,
724 					  enum dmub_notification_type type,
725 					  dmub_notify_interrupt_callback_t callback,
726 					  bool dmub_int_thread_offload)
727 {
728 	if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) {
729 		adev->dm.dmub_callback[type] = callback;
730 		adev->dm.dmub_thread_offload[type] = dmub_int_thread_offload;
731 	} else
732 		return false;
733 
734 	return true;
735 }
736 
737 static void dm_handle_hpd_work(struct work_struct *work)
738 {
739 	struct dmub_hpd_work *dmub_hpd_wrk;
740 
741 	dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work);
742 
743 	if (!dmub_hpd_wrk->dmub_notify) {
744 		DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL");
745 		return;
746 	}
747 
748 	if (dmub_hpd_wrk->dmub_notify->type < ARRAY_SIZE(dmub_hpd_wrk->adev->dm.dmub_callback)) {
749 		dmub_hpd_wrk->adev->dm.dmub_callback[dmub_hpd_wrk->dmub_notify->type](dmub_hpd_wrk->adev,
750 		dmub_hpd_wrk->dmub_notify);
751 	}
752 
753 	kfree(dmub_hpd_wrk->dmub_notify);
754 	kfree(dmub_hpd_wrk);
755 
756 }
757 
758 #define DMUB_TRACE_MAX_READ 64
759 /**
760  * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
761  * @interrupt_params: used for determining the Outbox instance
762  *
763  * Handles the Outbox Interrupt
764  * event handler.
765  */
766 static void dm_dmub_outbox1_low_irq(void *interrupt_params)
767 {
768 	struct dmub_notification notify;
769 	struct common_irq_params *irq_params = interrupt_params;
770 	struct amdgpu_device *adev = irq_params->adev;
771 	struct amdgpu_display_manager *dm = &adev->dm;
772 	struct dmcub_trace_buf_entry entry = { 0 };
773 	uint32_t count = 0;
774 	struct dmub_hpd_work *dmub_hpd_wrk;
775 	struct dc_link *plink = NULL;
776 
777 	if (dc_enable_dmub_notifications(adev->dm.dc) &&
778 		irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
779 
780 		do {
781 			dc_stat_get_dmub_notification(adev->dm.dc, &notify);
782 			if (notify.type >= ARRAY_SIZE(dm->dmub_thread_offload)) {
783 				DRM_ERROR("DM: notify type %d invalid!", notify.type);
784 				continue;
785 			}
786 			if (!dm->dmub_callback[notify.type]) {
787 				DRM_DEBUG_DRIVER("DMUB notification skipped, no handler: type=%d\n", notify.type);
788 				continue;
789 			}
790 			if (dm->dmub_thread_offload[notify.type] == true) {
791 				dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC);
792 				if (!dmub_hpd_wrk) {
793 					DRM_ERROR("Failed to allocate dmub_hpd_wrk");
794 					return;
795 				}
796 				dmub_hpd_wrk->dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_ATOMIC);
797 				if (!dmub_hpd_wrk->dmub_notify) {
798 					kfree(dmub_hpd_wrk);
799 					DRM_ERROR("Failed to allocate dmub_hpd_wrk->dmub_notify");
800 					return;
801 				}
802 				INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work);
803 				if (dmub_hpd_wrk->dmub_notify)
804 					memcpy(dmub_hpd_wrk->dmub_notify, &notify, sizeof(struct dmub_notification));
805 				dmub_hpd_wrk->adev = adev;
806 				if (notify.type == DMUB_NOTIFICATION_HPD) {
807 					plink = adev->dm.dc->links[notify.link_index];
808 					if (plink) {
809 						plink->hpd_status =
810 							notify.hpd_status == DP_HPD_PLUG;
811 					}
812 				}
813 				queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work);
814 			} else {
815 				dm->dmub_callback[notify.type](adev, &notify);
816 			}
817 		} while (notify.pending_notification);
818 	}
819 
820 
821 	do {
822 		if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
823 			trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
824 							entry.param0, entry.param1);
825 
826 			DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
827 				 entry.trace_code, entry.tick_count, entry.param0, entry.param1);
828 		} else
829 			break;
830 
831 		count++;
832 
833 	} while (count <= DMUB_TRACE_MAX_READ);
834 
835 	if (count > DMUB_TRACE_MAX_READ)
836 		DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ");
837 }
838 
839 static int dm_set_clockgating_state(void *handle,
840 		  enum amd_clockgating_state state)
841 {
842 	return 0;
843 }
844 
845 static int dm_set_powergating_state(void *handle,
846 		  enum amd_powergating_state state)
847 {
848 	return 0;
849 }
850 
851 /* Prototypes of private functions */
852 static int dm_early_init(void* handle);
853 
854 /* Allocate memory for FBC compressed data  */
855 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
856 {
857 	struct drm_device *dev = connector->dev;
858 	struct amdgpu_device *adev = drm_to_adev(dev);
859 	struct dm_compressor_info *compressor = &adev->dm.compressor;
860 	struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
861 	struct drm_display_mode *mode;
862 	unsigned long max_size = 0;
863 
864 	if (adev->dm.dc->fbc_compressor == NULL)
865 		return;
866 
867 	if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
868 		return;
869 
870 	if (compressor->bo_ptr)
871 		return;
872 
873 
874 	list_for_each_entry(mode, &connector->modes, head) {
875 		if (max_size < mode->htotal * mode->vtotal)
876 			max_size = mode->htotal * mode->vtotal;
877 	}
878 
879 	if (max_size) {
880 		int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
881 			    AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
882 			    &compressor->gpu_addr, &compressor->cpu_addr);
883 
884 		if (r)
885 			DRM_ERROR("DM: Failed to initialize FBC\n");
886 		else {
887 			adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
888 			DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
889 		}
890 
891 	}
892 
893 }
894 
895 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
896 					  int pipe, bool *enabled,
897 					  unsigned char *buf, int max_bytes)
898 {
899 	struct drm_device *dev = dev_get_drvdata(kdev);
900 	struct amdgpu_device *adev = drm_to_adev(dev);
901 	struct drm_connector *connector;
902 	struct drm_connector_list_iter conn_iter;
903 	struct amdgpu_dm_connector *aconnector;
904 	int ret = 0;
905 
906 	*enabled = false;
907 
908 	mutex_lock(&adev->dm.audio_lock);
909 
910 	drm_connector_list_iter_begin(dev, &conn_iter);
911 	drm_for_each_connector_iter(connector, &conn_iter) {
912 		aconnector = to_amdgpu_dm_connector(connector);
913 		if (aconnector->audio_inst != port)
914 			continue;
915 
916 		*enabled = true;
917 		ret = drm_eld_size(connector->eld);
918 		memcpy(buf, connector->eld, min(max_bytes, ret));
919 
920 		break;
921 	}
922 	drm_connector_list_iter_end(&conn_iter);
923 
924 	mutex_unlock(&adev->dm.audio_lock);
925 
926 	DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
927 
928 	return ret;
929 }
930 
931 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
932 	.get_eld = amdgpu_dm_audio_component_get_eld,
933 };
934 
935 static int amdgpu_dm_audio_component_bind(struct device *kdev,
936 				       struct device *hda_kdev, void *data)
937 {
938 	struct drm_device *dev = dev_get_drvdata(kdev);
939 	struct amdgpu_device *adev = drm_to_adev(dev);
940 	struct drm_audio_component *acomp = data;
941 
942 	acomp->ops = &amdgpu_dm_audio_component_ops;
943 	acomp->dev = kdev;
944 	adev->dm.audio_component = acomp;
945 
946 	return 0;
947 }
948 
949 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
950 					  struct device *hda_kdev, void *data)
951 {
952 	struct drm_device *dev = dev_get_drvdata(kdev);
953 	struct amdgpu_device *adev = drm_to_adev(dev);
954 	struct drm_audio_component *acomp = data;
955 
956 	acomp->ops = NULL;
957 	acomp->dev = NULL;
958 	adev->dm.audio_component = NULL;
959 }
960 
961 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
962 	.bind	= amdgpu_dm_audio_component_bind,
963 	.unbind	= amdgpu_dm_audio_component_unbind,
964 };
965 
966 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
967 {
968 	int i, ret;
969 
970 	if (!amdgpu_audio)
971 		return 0;
972 
973 	adev->mode_info.audio.enabled = true;
974 
975 	adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
976 
977 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
978 		adev->mode_info.audio.pin[i].channels = -1;
979 		adev->mode_info.audio.pin[i].rate = -1;
980 		adev->mode_info.audio.pin[i].bits_per_sample = -1;
981 		adev->mode_info.audio.pin[i].status_bits = 0;
982 		adev->mode_info.audio.pin[i].category_code = 0;
983 		adev->mode_info.audio.pin[i].connected = false;
984 		adev->mode_info.audio.pin[i].id =
985 			adev->dm.dc->res_pool->audios[i]->inst;
986 		adev->mode_info.audio.pin[i].offset = 0;
987 	}
988 
989 	ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
990 	if (ret < 0)
991 		return ret;
992 
993 	adev->dm.audio_registered = true;
994 
995 	return 0;
996 }
997 
998 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
999 {
1000 	if (!amdgpu_audio)
1001 		return;
1002 
1003 	if (!adev->mode_info.audio.enabled)
1004 		return;
1005 
1006 	if (adev->dm.audio_registered) {
1007 		component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
1008 		adev->dm.audio_registered = false;
1009 	}
1010 
1011 	/* TODO: Disable audio? */
1012 
1013 	adev->mode_info.audio.enabled = false;
1014 }
1015 
1016 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
1017 {
1018 	struct drm_audio_component *acomp = adev->dm.audio_component;
1019 
1020 	if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
1021 		DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
1022 
1023 		acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
1024 						 pin, -1);
1025 	}
1026 }
1027 
1028 static int dm_dmub_hw_init(struct amdgpu_device *adev)
1029 {
1030 	const struct dmcub_firmware_header_v1_0 *hdr;
1031 	struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1032 	struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
1033 	const struct firmware *dmub_fw = adev->dm.dmub_fw;
1034 	struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
1035 	struct abm *abm = adev->dm.dc->res_pool->abm;
1036 	struct dmub_srv_hw_params hw_params;
1037 	enum dmub_status status;
1038 	const unsigned char *fw_inst_const, *fw_bss_data;
1039 	uint32_t i, fw_inst_const_size, fw_bss_data_size;
1040 	bool has_hw_support;
1041 
1042 	if (!dmub_srv)
1043 		/* DMUB isn't supported on the ASIC. */
1044 		return 0;
1045 
1046 	if (!fb_info) {
1047 		DRM_ERROR("No framebuffer info for DMUB service.\n");
1048 		return -EINVAL;
1049 	}
1050 
1051 	if (!dmub_fw) {
1052 		/* Firmware required for DMUB support. */
1053 		DRM_ERROR("No firmware provided for DMUB.\n");
1054 		return -EINVAL;
1055 	}
1056 
1057 	status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
1058 	if (status != DMUB_STATUS_OK) {
1059 		DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
1060 		return -EINVAL;
1061 	}
1062 
1063 	if (!has_hw_support) {
1064 		DRM_INFO("DMUB unsupported on ASIC\n");
1065 		return 0;
1066 	}
1067 
1068 	/* Reset DMCUB if it was previously running - before we overwrite its memory. */
1069 	status = dmub_srv_hw_reset(dmub_srv);
1070 	if (status != DMUB_STATUS_OK)
1071 		DRM_WARN("Error resetting DMUB HW: %d\n", status);
1072 
1073 	hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
1074 
1075 	fw_inst_const = dmub_fw->data +
1076 			le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1077 			PSP_HEADER_BYTES;
1078 
1079 	fw_bss_data = dmub_fw->data +
1080 		      le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1081 		      le32_to_cpu(hdr->inst_const_bytes);
1082 
1083 	/* Copy firmware and bios info into FB memory. */
1084 	fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1085 			     PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1086 
1087 	fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1088 
1089 	/* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
1090 	 * amdgpu_ucode_init_single_fw will load dmub firmware
1091 	 * fw_inst_const part to cw0; otherwise, the firmware back door load
1092 	 * will be done by dm_dmub_hw_init
1093 	 */
1094 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1095 		memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
1096 				fw_inst_const_size);
1097 	}
1098 
1099 	if (fw_bss_data_size)
1100 		memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
1101 		       fw_bss_data, fw_bss_data_size);
1102 
1103 	/* Copy firmware bios info into FB memory. */
1104 	memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
1105 	       adev->bios_size);
1106 
1107 	/* Reset regions that need to be reset. */
1108 	memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
1109 	fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
1110 
1111 	memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
1112 	       fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
1113 
1114 	memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
1115 	       fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
1116 
1117 	/* Initialize hardware. */
1118 	memset(&hw_params, 0, sizeof(hw_params));
1119 	hw_params.fb_base = adev->gmc.fb_start;
1120 	hw_params.fb_offset = adev->gmc.aper_base;
1121 
1122 	/* backdoor load firmware and trigger dmub running */
1123 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
1124 		hw_params.load_inst_const = true;
1125 
1126 	if (dmcu)
1127 		hw_params.psp_version = dmcu->psp_version;
1128 
1129 	for (i = 0; i < fb_info->num_fb; ++i)
1130 		hw_params.fb[i] = &fb_info->fb[i];
1131 
1132 	switch (adev->ip_versions[DCE_HWIP][0]) {
1133 	case IP_VERSION(3, 1, 3): /* Only for this asic hw internal rev B0 */
1134 		hw_params.dpia_supported = true;
1135 		hw_params.disable_dpia = adev->dm.dc->debug.dpia_debug.bits.disable_dpia;
1136 		break;
1137 	default:
1138 		break;
1139 	}
1140 
1141 	status = dmub_srv_hw_init(dmub_srv, &hw_params);
1142 	if (status != DMUB_STATUS_OK) {
1143 		DRM_ERROR("Error initializing DMUB HW: %d\n", status);
1144 		return -EINVAL;
1145 	}
1146 
1147 	/* Wait for firmware load to finish. */
1148 	status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1149 	if (status != DMUB_STATUS_OK)
1150 		DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1151 
1152 	/* Init DMCU and ABM if available. */
1153 	if (dmcu && abm) {
1154 		dmcu->funcs->dmcu_init(dmcu);
1155 		abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1156 	}
1157 
1158 	if (!adev->dm.dc->ctx->dmub_srv)
1159 		adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
1160 	if (!adev->dm.dc->ctx->dmub_srv) {
1161 		DRM_ERROR("Couldn't allocate DC DMUB server!\n");
1162 		return -ENOMEM;
1163 	}
1164 
1165 	DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
1166 		 adev->dm.dmcub_fw_version);
1167 
1168 	return 0;
1169 }
1170 
1171 static void dm_dmub_hw_resume(struct amdgpu_device *adev)
1172 {
1173 	struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1174 	enum dmub_status status;
1175 	bool init;
1176 
1177 	if (!dmub_srv) {
1178 		/* DMUB isn't supported on the ASIC. */
1179 		return;
1180 	}
1181 
1182 	status = dmub_srv_is_hw_init(dmub_srv, &init);
1183 	if (status != DMUB_STATUS_OK)
1184 		DRM_WARN("DMUB hardware init check failed: %d\n", status);
1185 
1186 	if (status == DMUB_STATUS_OK && init) {
1187 		/* Wait for firmware load to finish. */
1188 		status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1189 		if (status != DMUB_STATUS_OK)
1190 			DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1191 	} else {
1192 		/* Perform the full hardware initialization. */
1193 		dm_dmub_hw_init(adev);
1194 	}
1195 }
1196 
1197 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
1198 {
1199 	uint64_t pt_base;
1200 	uint32_t logical_addr_low;
1201 	uint32_t logical_addr_high;
1202 	uint32_t agp_base, agp_bot, agp_top;
1203 	PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
1204 
1205 	memset(pa_config, 0, sizeof(*pa_config));
1206 
1207 	logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
1208 	pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
1209 
1210 	if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1211 		/*
1212 		 * Raven2 has a HW issue that it is unable to use the vram which
1213 		 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1214 		 * workaround that increase system aperture high address (add 1)
1215 		 * to get rid of the VM fault and hardware hang.
1216 		 */
1217 		logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
1218 	else
1219 		logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
1220 
1221 	agp_base = 0;
1222 	agp_bot = adev->gmc.agp_start >> 24;
1223 	agp_top = adev->gmc.agp_end >> 24;
1224 
1225 
1226 	page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
1227 	page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
1228 	page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
1229 	page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
1230 	page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
1231 	page_table_base.low_part = lower_32_bits(pt_base);
1232 
1233 	pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1234 	pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1235 
1236 	pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1237 	pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1238 	pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1239 
1240 	pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1241 	pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1242 	pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1243 
1244 	pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1245 	pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1246 	pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1247 
1248 	pa_config->is_hvm_enabled = 0;
1249 
1250 }
1251 
1252 static void vblank_control_worker(struct work_struct *work)
1253 {
1254 	struct vblank_control_work *vblank_work =
1255 		container_of(work, struct vblank_control_work, work);
1256 	struct amdgpu_display_manager *dm = vblank_work->dm;
1257 
1258 	mutex_lock(&dm->dc_lock);
1259 
1260 	if (vblank_work->enable)
1261 		dm->active_vblank_irq_count++;
1262 	else if(dm->active_vblank_irq_count)
1263 		dm->active_vblank_irq_count--;
1264 
1265 	dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
1266 
1267 	DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
1268 
1269 	/*
1270 	 * Control PSR based on vblank requirements from OS
1271 	 *
1272 	 * If panel supports PSR SU, there's no need to disable PSR when OS is
1273 	 * submitting fast atomic commits (we infer this by whether the OS
1274 	 * requests vblank events). Fast atomic commits will simply trigger a
1275 	 * full-frame-update (FFU); a specific case of selective-update (SU)
1276 	 * where the SU region is the full hactive*vactive region. See
1277 	 * fill_dc_dirty_rects().
1278 	 */
1279 	if (vblank_work->stream && vblank_work->stream->link) {
1280 		if (vblank_work->enable) {
1281 			if (vblank_work->stream->link->psr_settings.psr_version < DC_PSR_VERSION_SU_1 &&
1282 			    vblank_work->stream->link->psr_settings.psr_allow_active)
1283 				amdgpu_dm_psr_disable(vblank_work->stream);
1284 		} else if (vblank_work->stream->link->psr_settings.psr_feature_enabled &&
1285 			   !vblank_work->stream->link->psr_settings.psr_allow_active &&
1286 			   vblank_work->acrtc->dm_irq_params.allow_psr_entry) {
1287 			amdgpu_dm_psr_enable(vblank_work->stream);
1288 		}
1289 	}
1290 
1291 	mutex_unlock(&dm->dc_lock);
1292 
1293 	dc_stream_release(vblank_work->stream);
1294 
1295 	kfree(vblank_work);
1296 }
1297 
1298 static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
1299 {
1300 	struct hpd_rx_irq_offload_work *offload_work;
1301 	struct amdgpu_dm_connector *aconnector;
1302 	struct dc_link *dc_link;
1303 	struct amdgpu_device *adev;
1304 	enum dc_connection_type new_connection_type = dc_connection_none;
1305 	unsigned long flags;
1306 
1307 	offload_work = container_of(work, struct hpd_rx_irq_offload_work, work);
1308 	aconnector = offload_work->offload_wq->aconnector;
1309 
1310 	if (!aconnector) {
1311 		DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work");
1312 		goto skip;
1313 	}
1314 
1315 	adev = drm_to_adev(aconnector->base.dev);
1316 	dc_link = aconnector->dc_link;
1317 
1318 	mutex_lock(&aconnector->hpd_lock);
1319 	if (!dc_link_detect_sink(dc_link, &new_connection_type))
1320 		DRM_ERROR("KMS: Failed to detect connector\n");
1321 	mutex_unlock(&aconnector->hpd_lock);
1322 
1323 	if (new_connection_type == dc_connection_none)
1324 		goto skip;
1325 
1326 	if (amdgpu_in_reset(adev))
1327 		goto skip;
1328 
1329 	mutex_lock(&adev->dm.dc_lock);
1330 	if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST)
1331 		dc_link_dp_handle_automated_test(dc_link);
1332 	else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
1333 			hpd_rx_irq_check_link_loss_status(dc_link, &offload_work->data) &&
1334 			dc_link_dp_allow_hpd_rx_irq(dc_link)) {
1335 		dc_link_dp_handle_link_loss(dc_link);
1336 		spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
1337 		offload_work->offload_wq->is_handling_link_loss = false;
1338 		spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
1339 	}
1340 	mutex_unlock(&adev->dm.dc_lock);
1341 
1342 skip:
1343 	kfree(offload_work);
1344 
1345 }
1346 
1347 static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc)
1348 {
1349 	int max_caps = dc->caps.max_links;
1350 	int i = 0;
1351 	struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL;
1352 
1353 	hpd_rx_offload_wq = kcalloc(max_caps, sizeof(*hpd_rx_offload_wq), GFP_KERNEL);
1354 
1355 	if (!hpd_rx_offload_wq)
1356 		return NULL;
1357 
1358 
1359 	for (i = 0; i < max_caps; i++) {
1360 		hpd_rx_offload_wq[i].wq =
1361 				    create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq");
1362 
1363 		if (hpd_rx_offload_wq[i].wq == NULL) {
1364 			DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!");
1365 			return NULL;
1366 		}
1367 
1368 		spin_lock_init(&hpd_rx_offload_wq[i].offload_lock);
1369 	}
1370 
1371 	return hpd_rx_offload_wq;
1372 }
1373 
1374 struct amdgpu_stutter_quirk {
1375 	u16 chip_vendor;
1376 	u16 chip_device;
1377 	u16 subsys_vendor;
1378 	u16 subsys_device;
1379 	u8 revision;
1380 };
1381 
1382 static const struct amdgpu_stutter_quirk amdgpu_stutter_quirk_list[] = {
1383 	/* https://bugzilla.kernel.org/show_bug.cgi?id=214417 */
1384 	{ 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
1385 	{ 0, 0, 0, 0, 0 },
1386 };
1387 
1388 static bool dm_should_disable_stutter(struct pci_dev *pdev)
1389 {
1390 	const struct amdgpu_stutter_quirk *p = amdgpu_stutter_quirk_list;
1391 
1392 	while (p && p->chip_device != 0) {
1393 		if (pdev->vendor == p->chip_vendor &&
1394 		    pdev->device == p->chip_device &&
1395 		    pdev->subsystem_vendor == p->subsys_vendor &&
1396 		    pdev->subsystem_device == p->subsys_device &&
1397 		    pdev->revision == p->revision) {
1398 			return true;
1399 		}
1400 		++p;
1401 	}
1402 	return false;
1403 }
1404 
1405 static const struct dmi_system_id hpd_disconnect_quirk_table[] = {
1406 	{
1407 		.matches = {
1408 			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1409 			DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3660"),
1410 		},
1411 	},
1412 	{
1413 		.matches = {
1414 			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1415 			DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3260"),
1416 		},
1417 	},
1418 	{
1419 		.matches = {
1420 			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1421 			DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3460"),
1422 		},
1423 	},
1424 	{}
1425 };
1426 
1427 static void retrieve_dmi_info(struct amdgpu_display_manager *dm)
1428 {
1429 	const struct dmi_system_id *dmi_id;
1430 
1431 	dm->aux_hpd_discon_quirk = false;
1432 
1433 	dmi_id = dmi_first_match(hpd_disconnect_quirk_table);
1434 	if (dmi_id) {
1435 		dm->aux_hpd_discon_quirk = true;
1436 		DRM_INFO("aux_hpd_discon_quirk attached\n");
1437 	}
1438 }
1439 
1440 static int amdgpu_dm_init(struct amdgpu_device *adev)
1441 {
1442 	struct dc_init_data init_data;
1443 #ifdef CONFIG_DRM_AMD_DC_HDCP
1444 	struct dc_callback_init init_params;
1445 #endif
1446 	int r;
1447 
1448 	adev->dm.ddev = adev_to_drm(adev);
1449 	adev->dm.adev = adev;
1450 
1451 	/* Zero all the fields */
1452 	memset(&init_data, 0, sizeof(init_data));
1453 #ifdef CONFIG_DRM_AMD_DC_HDCP
1454 	memset(&init_params, 0, sizeof(init_params));
1455 #endif
1456 
1457 	mutex_init(&adev->dm.dc_lock);
1458 	mutex_init(&adev->dm.audio_lock);
1459 	spin_lock_init(&adev->dm.vblank_lock);
1460 
1461 	if(amdgpu_dm_irq_init(adev)) {
1462 		DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1463 		goto error;
1464 	}
1465 
1466 	init_data.asic_id.chip_family = adev->family;
1467 
1468 	init_data.asic_id.pci_revision_id = adev->pdev->revision;
1469 	init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1470 	init_data.asic_id.chip_id = adev->pdev->device;
1471 
1472 	init_data.asic_id.vram_width = adev->gmc.vram_width;
1473 	/* TODO: initialize init_data.asic_id.vram_type here!!!! */
1474 	init_data.asic_id.atombios_base_address =
1475 		adev->mode_info.atom_context->bios;
1476 
1477 	init_data.driver = adev;
1478 
1479 	adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1480 
1481 	if (!adev->dm.cgs_device) {
1482 		DRM_ERROR("amdgpu: failed to create cgs device.\n");
1483 		goto error;
1484 	}
1485 
1486 	init_data.cgs_device = adev->dm.cgs_device;
1487 
1488 	init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1489 
1490 	switch (adev->ip_versions[DCE_HWIP][0]) {
1491 	case IP_VERSION(2, 1, 0):
1492 		switch (adev->dm.dmcub_fw_version) {
1493 		case 0: /* development */
1494 		case 0x1: /* linux-firmware.git hash 6d9f399 */
1495 		case 0x01000000: /* linux-firmware.git hash 9a0b0f4 */
1496 			init_data.flags.disable_dmcu = false;
1497 			break;
1498 		default:
1499 			init_data.flags.disable_dmcu = true;
1500 		}
1501 		break;
1502 	case IP_VERSION(2, 0, 3):
1503 		init_data.flags.disable_dmcu = true;
1504 		break;
1505 	default:
1506 		break;
1507 	}
1508 
1509 	switch (adev->asic_type) {
1510 	case CHIP_CARRIZO:
1511 	case CHIP_STONEY:
1512 		init_data.flags.gpu_vm_support = true;
1513 		break;
1514 	default:
1515 		switch (adev->ip_versions[DCE_HWIP][0]) {
1516 		case IP_VERSION(1, 0, 0):
1517 		case IP_VERSION(1, 0, 1):
1518 			/* enable S/G on PCO and RV2 */
1519 			if ((adev->apu_flags & AMD_APU_IS_RAVEN2) ||
1520 			    (adev->apu_flags & AMD_APU_IS_PICASSO))
1521 				init_data.flags.gpu_vm_support = true;
1522 			break;
1523 		case IP_VERSION(2, 1, 0):
1524 		case IP_VERSION(3, 0, 1):
1525 		case IP_VERSION(3, 1, 2):
1526 		case IP_VERSION(3, 1, 3):
1527 		case IP_VERSION(3, 1, 5):
1528 		case IP_VERSION(3, 1, 6):
1529 			init_data.flags.gpu_vm_support = true;
1530 			break;
1531 		default:
1532 			break;
1533 		}
1534 		break;
1535 	}
1536 
1537 	if (init_data.flags.gpu_vm_support)
1538 		adev->mode_info.gpu_vm_support = true;
1539 
1540 	if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1541 		init_data.flags.fbc_support = true;
1542 
1543 	if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1544 		init_data.flags.multi_mon_pp_mclk_switch = true;
1545 
1546 	if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1547 		init_data.flags.disable_fractional_pwm = true;
1548 
1549 	if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING)
1550 		init_data.flags.edp_no_power_sequencing = true;
1551 
1552 	if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP1_4A)
1553 		init_data.flags.allow_lttpr_non_transparent_mode.bits.DP1_4A = true;
1554 	if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP2_0)
1555 		init_data.flags.allow_lttpr_non_transparent_mode.bits.DP2_0 = true;
1556 
1557 	init_data.flags.seamless_boot_edp_requested = false;
1558 
1559 	if (check_seamless_boot_capability(adev)) {
1560 		init_data.flags.seamless_boot_edp_requested = true;
1561 		init_data.flags.allow_seamless_boot_optimization = true;
1562 		DRM_INFO("Seamless boot condition check passed\n");
1563 	}
1564 
1565 	init_data.flags.enable_mipi_converter_optimization = true;
1566 
1567 	init_data.dcn_reg_offsets = adev->reg_offset[DCE_HWIP][0];
1568 	init_data.nbio_reg_offsets = adev->reg_offset[NBIO_HWIP][0];
1569 
1570 	INIT_LIST_HEAD(&adev->dm.da_list);
1571 
1572 	retrieve_dmi_info(&adev->dm);
1573 
1574 	/* Display Core create. */
1575 	adev->dm.dc = dc_create(&init_data);
1576 
1577 	if (adev->dm.dc) {
1578 		DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1579 	} else {
1580 		DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1581 		goto error;
1582 	}
1583 
1584 	if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1585 		adev->dm.dc->debug.force_single_disp_pipe_split = false;
1586 		adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1587 	}
1588 
1589 	if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1590 		adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1591 	if (dm_should_disable_stutter(adev->pdev))
1592 		adev->dm.dc->debug.disable_stutter = true;
1593 
1594 	if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1595 		adev->dm.dc->debug.disable_stutter = true;
1596 
1597 	if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) {
1598 		adev->dm.dc->debug.disable_dsc = true;
1599 		adev->dm.dc->debug.disable_dsc_edp = true;
1600 	}
1601 
1602 	if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1603 		adev->dm.dc->debug.disable_clock_gate = true;
1604 
1605 	if (amdgpu_dc_debug_mask & DC_FORCE_SUBVP_MCLK_SWITCH)
1606 		adev->dm.dc->debug.force_subvp_mclk_switch = true;
1607 
1608 	r = dm_dmub_hw_init(adev);
1609 	if (r) {
1610 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1611 		goto error;
1612 	}
1613 
1614 	dc_hardware_init(adev->dm.dc);
1615 
1616 	adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc);
1617 	if (!adev->dm.hpd_rx_offload_wq) {
1618 		DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n");
1619 		goto error;
1620 	}
1621 
1622 	if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) {
1623 		struct dc_phy_addr_space_config pa_config;
1624 
1625 		mmhub_read_system_context(adev, &pa_config);
1626 
1627 		// Call the DC init_memory func
1628 		dc_setup_system_context(adev->dm.dc, &pa_config);
1629 	}
1630 
1631 	adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1632 	if (!adev->dm.freesync_module) {
1633 		DRM_ERROR(
1634 		"amdgpu: failed to initialize freesync_module.\n");
1635 	} else
1636 		DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1637 				adev->dm.freesync_module);
1638 
1639 	amdgpu_dm_init_color_mod();
1640 
1641 	if (adev->dm.dc->caps.max_links > 0) {
1642 		adev->dm.vblank_control_workqueue =
1643 			create_singlethread_workqueue("dm_vblank_control_workqueue");
1644 		if (!adev->dm.vblank_control_workqueue)
1645 			DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1646 	}
1647 
1648 #ifdef CONFIG_DRM_AMD_DC_HDCP
1649 	if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) {
1650 		adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1651 
1652 		if (!adev->dm.hdcp_workqueue)
1653 			DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1654 		else
1655 			DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1656 
1657 		dc_init_callbacks(adev->dm.dc, &init_params);
1658 	}
1659 #endif
1660 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1661 	adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
1662 #endif
1663 	if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
1664 		init_completion(&adev->dm.dmub_aux_transfer_done);
1665 		adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
1666 		if (!adev->dm.dmub_notify) {
1667 			DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
1668 			goto error;
1669 		}
1670 
1671 		adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq");
1672 		if (!adev->dm.delayed_hpd_wq) {
1673 			DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n");
1674 			goto error;
1675 		}
1676 
1677 		amdgpu_dm_outbox_init(adev);
1678 		if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY,
1679 			dmub_aux_setconfig_callback, false)) {
1680 			DRM_ERROR("amdgpu: fail to register dmub aux callback");
1681 			goto error;
1682 		}
1683 		if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) {
1684 			DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1685 			goto error;
1686 		}
1687 		if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true)) {
1688 			DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1689 			goto error;
1690 		}
1691 	}
1692 
1693 	if (amdgpu_dm_initialize_drm_device(adev)) {
1694 		DRM_ERROR(
1695 		"amdgpu: failed to initialize sw for display support.\n");
1696 		goto error;
1697 	}
1698 
1699 	/* Enable outbox notification only after IRQ handlers are registered and DMUB is alive.
1700 	 * It is expected that DMUB will resend any pending notifications at this point, for
1701 	 * example HPD from DPIA.
1702 	 */
1703 	if (dc_is_dmub_outbox_supported(adev->dm.dc))
1704 		dc_enable_dmub_outbox(adev->dm.dc);
1705 
1706 	/* create fake encoders for MST */
1707 	dm_dp_create_fake_mst_encoders(adev);
1708 
1709 	/* TODO: Add_display_info? */
1710 
1711 	/* TODO use dynamic cursor width */
1712 	adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1713 	adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1714 
1715 	if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1716 		DRM_ERROR(
1717 		"amdgpu: failed to initialize sw for display support.\n");
1718 		goto error;
1719 	}
1720 
1721 
1722 	DRM_DEBUG_DRIVER("KMS initialized.\n");
1723 
1724 	return 0;
1725 error:
1726 	amdgpu_dm_fini(adev);
1727 
1728 	return -EINVAL;
1729 }
1730 
1731 static int amdgpu_dm_early_fini(void *handle)
1732 {
1733 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1734 
1735 	amdgpu_dm_audio_fini(adev);
1736 
1737 	return 0;
1738 }
1739 
1740 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1741 {
1742 	int i;
1743 
1744 	if (adev->dm.vblank_control_workqueue) {
1745 		destroy_workqueue(adev->dm.vblank_control_workqueue);
1746 		adev->dm.vblank_control_workqueue = NULL;
1747 	}
1748 
1749 	for (i = 0; i < adev->dm.display_indexes_num; i++) {
1750 		drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1751 	}
1752 
1753 	amdgpu_dm_destroy_drm_device(&adev->dm);
1754 
1755 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1756 	if (adev->dm.crc_rd_wrk) {
1757 		flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1758 		kfree(adev->dm.crc_rd_wrk);
1759 		adev->dm.crc_rd_wrk = NULL;
1760 	}
1761 #endif
1762 #ifdef CONFIG_DRM_AMD_DC_HDCP
1763 	if (adev->dm.hdcp_workqueue) {
1764 		hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1765 		adev->dm.hdcp_workqueue = NULL;
1766 	}
1767 
1768 	if (adev->dm.dc)
1769 		dc_deinit_callbacks(adev->dm.dc);
1770 #endif
1771 
1772 	dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1773 
1774 	if (dc_enable_dmub_notifications(adev->dm.dc)) {
1775 		kfree(adev->dm.dmub_notify);
1776 		adev->dm.dmub_notify = NULL;
1777 		destroy_workqueue(adev->dm.delayed_hpd_wq);
1778 		adev->dm.delayed_hpd_wq = NULL;
1779 	}
1780 
1781 	if (adev->dm.dmub_bo)
1782 		amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1783 				      &adev->dm.dmub_bo_gpu_addr,
1784 				      &adev->dm.dmub_bo_cpu_addr);
1785 
1786 	if (adev->dm.hpd_rx_offload_wq) {
1787 		for (i = 0; i < adev->dm.dc->caps.max_links; i++) {
1788 			if (adev->dm.hpd_rx_offload_wq[i].wq) {
1789 				destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq);
1790 				adev->dm.hpd_rx_offload_wq[i].wq = NULL;
1791 			}
1792 		}
1793 
1794 		kfree(adev->dm.hpd_rx_offload_wq);
1795 		adev->dm.hpd_rx_offload_wq = NULL;
1796 	}
1797 
1798 	/* DC Destroy TODO: Replace destroy DAL */
1799 	if (adev->dm.dc)
1800 		dc_destroy(&adev->dm.dc);
1801 	/*
1802 	 * TODO: pageflip, vlank interrupt
1803 	 *
1804 	 * amdgpu_dm_irq_fini(adev);
1805 	 */
1806 
1807 	if (adev->dm.cgs_device) {
1808 		amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1809 		adev->dm.cgs_device = NULL;
1810 	}
1811 	if (adev->dm.freesync_module) {
1812 		mod_freesync_destroy(adev->dm.freesync_module);
1813 		adev->dm.freesync_module = NULL;
1814 	}
1815 
1816 	mutex_destroy(&adev->dm.audio_lock);
1817 	mutex_destroy(&adev->dm.dc_lock);
1818 
1819 	return;
1820 }
1821 
1822 static int load_dmcu_fw(struct amdgpu_device *adev)
1823 {
1824 	const char *fw_name_dmcu = NULL;
1825 	int r;
1826 	const struct dmcu_firmware_header_v1_0 *hdr;
1827 
1828 	switch(adev->asic_type) {
1829 #if defined(CONFIG_DRM_AMD_DC_SI)
1830 	case CHIP_TAHITI:
1831 	case CHIP_PITCAIRN:
1832 	case CHIP_VERDE:
1833 	case CHIP_OLAND:
1834 #endif
1835 	case CHIP_BONAIRE:
1836 	case CHIP_HAWAII:
1837 	case CHIP_KAVERI:
1838 	case CHIP_KABINI:
1839 	case CHIP_MULLINS:
1840 	case CHIP_TONGA:
1841 	case CHIP_FIJI:
1842 	case CHIP_CARRIZO:
1843 	case CHIP_STONEY:
1844 	case CHIP_POLARIS11:
1845 	case CHIP_POLARIS10:
1846 	case CHIP_POLARIS12:
1847 	case CHIP_VEGAM:
1848 	case CHIP_VEGA10:
1849 	case CHIP_VEGA12:
1850 	case CHIP_VEGA20:
1851 		return 0;
1852 	case CHIP_NAVI12:
1853 		fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1854 		break;
1855 	case CHIP_RAVEN:
1856 		if (ASICREV_IS_PICASSO(adev->external_rev_id))
1857 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1858 		else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1859 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1860 		else
1861 			return 0;
1862 		break;
1863 	default:
1864 		switch (adev->ip_versions[DCE_HWIP][0]) {
1865 		case IP_VERSION(2, 0, 2):
1866 		case IP_VERSION(2, 0, 3):
1867 		case IP_VERSION(2, 0, 0):
1868 		case IP_VERSION(2, 1, 0):
1869 		case IP_VERSION(3, 0, 0):
1870 		case IP_VERSION(3, 0, 2):
1871 		case IP_VERSION(3, 0, 3):
1872 		case IP_VERSION(3, 0, 1):
1873 		case IP_VERSION(3, 1, 2):
1874 		case IP_VERSION(3, 1, 3):
1875 		case IP_VERSION(3, 1, 5):
1876 		case IP_VERSION(3, 1, 6):
1877 		case IP_VERSION(3, 2, 0):
1878 		case IP_VERSION(3, 2, 1):
1879 			return 0;
1880 		default:
1881 			break;
1882 		}
1883 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1884 		return -EINVAL;
1885 	}
1886 
1887 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1888 		DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1889 		return 0;
1890 	}
1891 
1892 	r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1893 	if (r == -ENOENT) {
1894 		/* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1895 		DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1896 		adev->dm.fw_dmcu = NULL;
1897 		return 0;
1898 	}
1899 	if (r) {
1900 		dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1901 			fw_name_dmcu);
1902 		return r;
1903 	}
1904 
1905 	r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1906 	if (r) {
1907 		dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1908 			fw_name_dmcu);
1909 		release_firmware(adev->dm.fw_dmcu);
1910 		adev->dm.fw_dmcu = NULL;
1911 		return r;
1912 	}
1913 
1914 	hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1915 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1916 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1917 	adev->firmware.fw_size +=
1918 		ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1919 
1920 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1921 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1922 	adev->firmware.fw_size +=
1923 		ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1924 
1925 	adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1926 
1927 	DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1928 
1929 	return 0;
1930 }
1931 
1932 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1933 {
1934 	struct amdgpu_device *adev = ctx;
1935 
1936 	return dm_read_reg(adev->dm.dc->ctx, address);
1937 }
1938 
1939 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1940 				     uint32_t value)
1941 {
1942 	struct amdgpu_device *adev = ctx;
1943 
1944 	return dm_write_reg(adev->dm.dc->ctx, address, value);
1945 }
1946 
1947 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1948 {
1949 	struct dmub_srv_create_params create_params;
1950 	struct dmub_srv_region_params region_params;
1951 	struct dmub_srv_region_info region_info;
1952 	struct dmub_srv_fb_params fb_params;
1953 	struct dmub_srv_fb_info *fb_info;
1954 	struct dmub_srv *dmub_srv;
1955 	const struct dmcub_firmware_header_v1_0 *hdr;
1956 	const char *fw_name_dmub;
1957 	enum dmub_asic dmub_asic;
1958 	enum dmub_status status;
1959 	int r;
1960 
1961 	switch (adev->ip_versions[DCE_HWIP][0]) {
1962 	case IP_VERSION(2, 1, 0):
1963 		dmub_asic = DMUB_ASIC_DCN21;
1964 		fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1965 		if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1966 			fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1967 		break;
1968 	case IP_VERSION(3, 0, 0):
1969 		if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) {
1970 			dmub_asic = DMUB_ASIC_DCN30;
1971 			fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1972 		} else {
1973 			dmub_asic = DMUB_ASIC_DCN30;
1974 			fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1975 		}
1976 		break;
1977 	case IP_VERSION(3, 0, 1):
1978 		dmub_asic = DMUB_ASIC_DCN301;
1979 		fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1980 		break;
1981 	case IP_VERSION(3, 0, 2):
1982 		dmub_asic = DMUB_ASIC_DCN302;
1983 		fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1984 		break;
1985 	case IP_VERSION(3, 0, 3):
1986 		dmub_asic = DMUB_ASIC_DCN303;
1987 		fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
1988 		break;
1989 	case IP_VERSION(3, 1, 2):
1990 	case IP_VERSION(3, 1, 3):
1991 		dmub_asic = (adev->external_rev_id == YELLOW_CARP_B0) ? DMUB_ASIC_DCN31B : DMUB_ASIC_DCN31;
1992 		fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
1993 		break;
1994 	case IP_VERSION(3, 1, 5):
1995 		dmub_asic = DMUB_ASIC_DCN315;
1996 		fw_name_dmub = FIRMWARE_DCN_315_DMUB;
1997 		break;
1998 	case IP_VERSION(3, 1, 6):
1999 		dmub_asic = DMUB_ASIC_DCN316;
2000 		fw_name_dmub = FIRMWARE_DCN316_DMUB;
2001 		break;
2002 	case IP_VERSION(3, 2, 0):
2003 		dmub_asic = DMUB_ASIC_DCN32;
2004 		fw_name_dmub = FIRMWARE_DCN_V3_2_0_DMCUB;
2005 		break;
2006 	case IP_VERSION(3, 2, 1):
2007 		dmub_asic = DMUB_ASIC_DCN321;
2008 		fw_name_dmub = FIRMWARE_DCN_V3_2_1_DMCUB;
2009 		break;
2010 	default:
2011 		/* ASIC doesn't support DMUB. */
2012 		return 0;
2013 	}
2014 
2015 	r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
2016 	if (r) {
2017 		DRM_ERROR("DMUB firmware loading failed: %d\n", r);
2018 		return 0;
2019 	}
2020 
2021 	r = amdgpu_ucode_validate(adev->dm.dmub_fw);
2022 	if (r) {
2023 		DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
2024 		return 0;
2025 	}
2026 
2027 	hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
2028 	adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
2029 
2030 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
2031 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
2032 			AMDGPU_UCODE_ID_DMCUB;
2033 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
2034 			adev->dm.dmub_fw;
2035 		adev->firmware.fw_size +=
2036 			ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
2037 
2038 		DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
2039 			 adev->dm.dmcub_fw_version);
2040 	}
2041 
2042 
2043 	adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
2044 	dmub_srv = adev->dm.dmub_srv;
2045 
2046 	if (!dmub_srv) {
2047 		DRM_ERROR("Failed to allocate DMUB service!\n");
2048 		return -ENOMEM;
2049 	}
2050 
2051 	memset(&create_params, 0, sizeof(create_params));
2052 	create_params.user_ctx = adev;
2053 	create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
2054 	create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
2055 	create_params.asic = dmub_asic;
2056 
2057 	/* Create the DMUB service. */
2058 	status = dmub_srv_create(dmub_srv, &create_params);
2059 	if (status != DMUB_STATUS_OK) {
2060 		DRM_ERROR("Error creating DMUB service: %d\n", status);
2061 		return -EINVAL;
2062 	}
2063 
2064 	/* Calculate the size of all the regions for the DMUB service. */
2065 	memset(&region_params, 0, sizeof(region_params));
2066 
2067 	region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
2068 					PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
2069 	region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
2070 	region_params.vbios_size = adev->bios_size;
2071 	region_params.fw_bss_data = region_params.bss_data_size ?
2072 		adev->dm.dmub_fw->data +
2073 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
2074 		le32_to_cpu(hdr->inst_const_bytes) : NULL;
2075 	region_params.fw_inst_const =
2076 		adev->dm.dmub_fw->data +
2077 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
2078 		PSP_HEADER_BYTES;
2079 
2080 	status = dmub_srv_calc_region_info(dmub_srv, &region_params,
2081 					   &region_info);
2082 
2083 	if (status != DMUB_STATUS_OK) {
2084 		DRM_ERROR("Error calculating DMUB region info: %d\n", status);
2085 		return -EINVAL;
2086 	}
2087 
2088 	/*
2089 	 * Allocate a framebuffer based on the total size of all the regions.
2090 	 * TODO: Move this into GART.
2091 	 */
2092 	r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
2093 				    AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
2094 				    &adev->dm.dmub_bo_gpu_addr,
2095 				    &adev->dm.dmub_bo_cpu_addr);
2096 	if (r)
2097 		return r;
2098 
2099 	/* Rebase the regions on the framebuffer address. */
2100 	memset(&fb_params, 0, sizeof(fb_params));
2101 	fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
2102 	fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
2103 	fb_params.region_info = &region_info;
2104 
2105 	adev->dm.dmub_fb_info =
2106 		kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
2107 	fb_info = adev->dm.dmub_fb_info;
2108 
2109 	if (!fb_info) {
2110 		DRM_ERROR(
2111 			"Failed to allocate framebuffer info for DMUB service!\n");
2112 		return -ENOMEM;
2113 	}
2114 
2115 	status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
2116 	if (status != DMUB_STATUS_OK) {
2117 		DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
2118 		return -EINVAL;
2119 	}
2120 
2121 	return 0;
2122 }
2123 
2124 static int dm_sw_init(void *handle)
2125 {
2126 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2127 	int r;
2128 
2129 	r = dm_dmub_sw_init(adev);
2130 	if (r)
2131 		return r;
2132 
2133 	return load_dmcu_fw(adev);
2134 }
2135 
2136 static int dm_sw_fini(void *handle)
2137 {
2138 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2139 
2140 	kfree(adev->dm.dmub_fb_info);
2141 	adev->dm.dmub_fb_info = NULL;
2142 
2143 	if (adev->dm.dmub_srv) {
2144 		dmub_srv_destroy(adev->dm.dmub_srv);
2145 		adev->dm.dmub_srv = NULL;
2146 	}
2147 
2148 	release_firmware(adev->dm.dmub_fw);
2149 	adev->dm.dmub_fw = NULL;
2150 
2151 	release_firmware(adev->dm.fw_dmcu);
2152 	adev->dm.fw_dmcu = NULL;
2153 
2154 	return 0;
2155 }
2156 
2157 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
2158 {
2159 	struct amdgpu_dm_connector *aconnector;
2160 	struct drm_connector *connector;
2161 	struct drm_connector_list_iter iter;
2162 	int ret = 0;
2163 
2164 	drm_connector_list_iter_begin(dev, &iter);
2165 	drm_for_each_connector_iter(connector, &iter) {
2166 		aconnector = to_amdgpu_dm_connector(connector);
2167 		if (aconnector->dc_link->type == dc_connection_mst_branch &&
2168 		    aconnector->mst_mgr.aux) {
2169 			DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
2170 					 aconnector,
2171 					 aconnector->base.base.id);
2172 
2173 			ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
2174 			if (ret < 0) {
2175 				DRM_ERROR("DM_MST: Failed to start MST\n");
2176 				aconnector->dc_link->type =
2177 					dc_connection_single;
2178 				break;
2179 			}
2180 		}
2181 	}
2182 	drm_connector_list_iter_end(&iter);
2183 
2184 	return ret;
2185 }
2186 
2187 static int dm_late_init(void *handle)
2188 {
2189 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2190 
2191 	struct dmcu_iram_parameters params;
2192 	unsigned int linear_lut[16];
2193 	int i;
2194 	struct dmcu *dmcu = NULL;
2195 
2196 	dmcu = adev->dm.dc->res_pool->dmcu;
2197 
2198 	for (i = 0; i < 16; i++)
2199 		linear_lut[i] = 0xFFFF * i / 15;
2200 
2201 	params.set = 0;
2202 	params.backlight_ramping_override = false;
2203 	params.backlight_ramping_start = 0xCCCC;
2204 	params.backlight_ramping_reduction = 0xCCCCCCCC;
2205 	params.backlight_lut_array_size = 16;
2206 	params.backlight_lut_array = linear_lut;
2207 
2208 	/* Min backlight level after ABM reduction,  Don't allow below 1%
2209 	 * 0xFFFF x 0.01 = 0x28F
2210 	 */
2211 	params.min_abm_backlight = 0x28F;
2212 	/* In the case where abm is implemented on dmcub,
2213 	* dmcu object will be null.
2214 	* ABM 2.4 and up are implemented on dmcub.
2215 	*/
2216 	if (dmcu) {
2217 		if (!dmcu_load_iram(dmcu, params))
2218 			return -EINVAL;
2219 	} else if (adev->dm.dc->ctx->dmub_srv) {
2220 		struct dc_link *edp_links[MAX_NUM_EDP];
2221 		int edp_num;
2222 
2223 		get_edp_links(adev->dm.dc, edp_links, &edp_num);
2224 		for (i = 0; i < edp_num; i++) {
2225 			if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i))
2226 				return -EINVAL;
2227 		}
2228 	}
2229 
2230 	return detect_mst_link_for_all_connectors(adev_to_drm(adev));
2231 }
2232 
2233 static void s3_handle_mst(struct drm_device *dev, bool suspend)
2234 {
2235 	struct amdgpu_dm_connector *aconnector;
2236 	struct drm_connector *connector;
2237 	struct drm_connector_list_iter iter;
2238 	struct drm_dp_mst_topology_mgr *mgr;
2239 	int ret;
2240 	bool need_hotplug = false;
2241 
2242 	drm_connector_list_iter_begin(dev, &iter);
2243 	drm_for_each_connector_iter(connector, &iter) {
2244 		aconnector = to_amdgpu_dm_connector(connector);
2245 		if (aconnector->dc_link->type != dc_connection_mst_branch ||
2246 		    aconnector->mst_port)
2247 			continue;
2248 
2249 		mgr = &aconnector->mst_mgr;
2250 
2251 		if (suspend) {
2252 			drm_dp_mst_topology_mgr_suspend(mgr);
2253 		} else {
2254 			ret = drm_dp_mst_topology_mgr_resume(mgr, true);
2255 			if (ret < 0) {
2256 				dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx,
2257 					aconnector->dc_link);
2258 				need_hotplug = true;
2259 			}
2260 		}
2261 	}
2262 	drm_connector_list_iter_end(&iter);
2263 
2264 	if (need_hotplug)
2265 		drm_kms_helper_hotplug_event(dev);
2266 }
2267 
2268 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
2269 {
2270 	int ret = 0;
2271 
2272 	/* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
2273 	 * on window driver dc implementation.
2274 	 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
2275 	 * should be passed to smu during boot up and resume from s3.
2276 	 * boot up: dc calculate dcn watermark clock settings within dc_create,
2277 	 * dcn20_resource_construct
2278 	 * then call pplib functions below to pass the settings to smu:
2279 	 * smu_set_watermarks_for_clock_ranges
2280 	 * smu_set_watermarks_table
2281 	 * navi10_set_watermarks_table
2282 	 * smu_write_watermarks_table
2283 	 *
2284 	 * For Renoir, clock settings of dcn watermark are also fixed values.
2285 	 * dc has implemented different flow for window driver:
2286 	 * dc_hardware_init / dc_set_power_state
2287 	 * dcn10_init_hw
2288 	 * notify_wm_ranges
2289 	 * set_wm_ranges
2290 	 * -- Linux
2291 	 * smu_set_watermarks_for_clock_ranges
2292 	 * renoir_set_watermarks_table
2293 	 * smu_write_watermarks_table
2294 	 *
2295 	 * For Linux,
2296 	 * dc_hardware_init -> amdgpu_dm_init
2297 	 * dc_set_power_state --> dm_resume
2298 	 *
2299 	 * therefore, this function apply to navi10/12/14 but not Renoir
2300 	 * *
2301 	 */
2302 	switch (adev->ip_versions[DCE_HWIP][0]) {
2303 	case IP_VERSION(2, 0, 2):
2304 	case IP_VERSION(2, 0, 0):
2305 		break;
2306 	default:
2307 		return 0;
2308 	}
2309 
2310 	ret = amdgpu_dpm_write_watermarks_table(adev);
2311 	if (ret) {
2312 		DRM_ERROR("Failed to update WMTABLE!\n");
2313 		return ret;
2314 	}
2315 
2316 	return 0;
2317 }
2318 
2319 /**
2320  * dm_hw_init() - Initialize DC device
2321  * @handle: The base driver device containing the amdgpu_dm device.
2322  *
2323  * Initialize the &struct amdgpu_display_manager device. This involves calling
2324  * the initializers of each DM component, then populating the struct with them.
2325  *
2326  * Although the function implies hardware initialization, both hardware and
2327  * software are initialized here. Splitting them out to their relevant init
2328  * hooks is a future TODO item.
2329  *
2330  * Some notable things that are initialized here:
2331  *
2332  * - Display Core, both software and hardware
2333  * - DC modules that we need (freesync and color management)
2334  * - DRM software states
2335  * - Interrupt sources and handlers
2336  * - Vblank support
2337  * - Debug FS entries, if enabled
2338  */
2339 static int dm_hw_init(void *handle)
2340 {
2341 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2342 	/* Create DAL display manager */
2343 	amdgpu_dm_init(adev);
2344 	amdgpu_dm_hpd_init(adev);
2345 
2346 	return 0;
2347 }
2348 
2349 /**
2350  * dm_hw_fini() - Teardown DC device
2351  * @handle: The base driver device containing the amdgpu_dm device.
2352  *
2353  * Teardown components within &struct amdgpu_display_manager that require
2354  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
2355  * were loaded. Also flush IRQ workqueues and disable them.
2356  */
2357 static int dm_hw_fini(void *handle)
2358 {
2359 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2360 
2361 	amdgpu_dm_hpd_fini(adev);
2362 
2363 	amdgpu_dm_irq_fini(adev);
2364 	amdgpu_dm_fini(adev);
2365 	return 0;
2366 }
2367 
2368 
2369 static int dm_enable_vblank(struct drm_crtc *crtc);
2370 static void dm_disable_vblank(struct drm_crtc *crtc);
2371 
2372 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
2373 				 struct dc_state *state, bool enable)
2374 {
2375 	enum dc_irq_source irq_source;
2376 	struct amdgpu_crtc *acrtc;
2377 	int rc = -EBUSY;
2378 	int i = 0;
2379 
2380 	for (i = 0; i < state->stream_count; i++) {
2381 		acrtc = get_crtc_by_otg_inst(
2382 				adev, state->stream_status[i].primary_otg_inst);
2383 
2384 		if (acrtc && state->stream_status[i].plane_count != 0) {
2385 			irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
2386 			rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
2387 			DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
2388 				      acrtc->crtc_id, enable ? "en" : "dis", rc);
2389 			if (rc)
2390 				DRM_WARN("Failed to %s pflip interrupts\n",
2391 					 enable ? "enable" : "disable");
2392 
2393 			if (enable) {
2394 				rc = dm_enable_vblank(&acrtc->base);
2395 				if (rc)
2396 					DRM_WARN("Failed to enable vblank interrupts\n");
2397 			} else {
2398 				dm_disable_vblank(&acrtc->base);
2399 			}
2400 
2401 		}
2402 	}
2403 
2404 }
2405 
2406 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
2407 {
2408 	struct dc_state *context = NULL;
2409 	enum dc_status res = DC_ERROR_UNEXPECTED;
2410 	int i;
2411 	struct dc_stream_state *del_streams[MAX_PIPES];
2412 	int del_streams_count = 0;
2413 
2414 	memset(del_streams, 0, sizeof(del_streams));
2415 
2416 	context = dc_create_state(dc);
2417 	if (context == NULL)
2418 		goto context_alloc_fail;
2419 
2420 	dc_resource_state_copy_construct_current(dc, context);
2421 
2422 	/* First remove from context all streams */
2423 	for (i = 0; i < context->stream_count; i++) {
2424 		struct dc_stream_state *stream = context->streams[i];
2425 
2426 		del_streams[del_streams_count++] = stream;
2427 	}
2428 
2429 	/* Remove all planes for removed streams and then remove the streams */
2430 	for (i = 0; i < del_streams_count; i++) {
2431 		if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
2432 			res = DC_FAIL_DETACH_SURFACES;
2433 			goto fail;
2434 		}
2435 
2436 		res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
2437 		if (res != DC_OK)
2438 			goto fail;
2439 	}
2440 
2441 	res = dc_commit_state(dc, context);
2442 
2443 fail:
2444 	dc_release_state(context);
2445 
2446 context_alloc_fail:
2447 	return res;
2448 }
2449 
2450 static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm)
2451 {
2452 	int i;
2453 
2454 	if (dm->hpd_rx_offload_wq) {
2455 		for (i = 0; i < dm->dc->caps.max_links; i++)
2456 			flush_workqueue(dm->hpd_rx_offload_wq[i].wq);
2457 	}
2458 }
2459 
2460 static int dm_suspend(void *handle)
2461 {
2462 	struct amdgpu_device *adev = handle;
2463 	struct amdgpu_display_manager *dm = &adev->dm;
2464 	int ret = 0;
2465 
2466 	if (amdgpu_in_reset(adev)) {
2467 		mutex_lock(&dm->dc_lock);
2468 
2469 		dc_allow_idle_optimizations(adev->dm.dc, false);
2470 
2471 		dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
2472 
2473 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
2474 
2475 		amdgpu_dm_commit_zero_streams(dm->dc);
2476 
2477 		amdgpu_dm_irq_suspend(adev);
2478 
2479 		hpd_rx_irq_work_suspend(dm);
2480 
2481 		return ret;
2482 	}
2483 
2484 	WARN_ON(adev->dm.cached_state);
2485 	adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
2486 
2487 	s3_handle_mst(adev_to_drm(adev), true);
2488 
2489 	amdgpu_dm_irq_suspend(adev);
2490 
2491 	hpd_rx_irq_work_suspend(dm);
2492 
2493 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
2494 
2495 	return 0;
2496 }
2497 
2498 struct amdgpu_dm_connector *
2499 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
2500 					     struct drm_crtc *crtc)
2501 {
2502 	uint32_t i;
2503 	struct drm_connector_state *new_con_state;
2504 	struct drm_connector *connector;
2505 	struct drm_crtc *crtc_from_state;
2506 
2507 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
2508 		crtc_from_state = new_con_state->crtc;
2509 
2510 		if (crtc_from_state == crtc)
2511 			return to_amdgpu_dm_connector(connector);
2512 	}
2513 
2514 	return NULL;
2515 }
2516 
2517 static void emulated_link_detect(struct dc_link *link)
2518 {
2519 	struct dc_sink_init_data sink_init_data = { 0 };
2520 	struct display_sink_capability sink_caps = { 0 };
2521 	enum dc_edid_status edid_status;
2522 	struct dc_context *dc_ctx = link->ctx;
2523 	struct dc_sink *sink = NULL;
2524 	struct dc_sink *prev_sink = NULL;
2525 
2526 	link->type = dc_connection_none;
2527 	prev_sink = link->local_sink;
2528 
2529 	if (prev_sink)
2530 		dc_sink_release(prev_sink);
2531 
2532 	switch (link->connector_signal) {
2533 	case SIGNAL_TYPE_HDMI_TYPE_A: {
2534 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2535 		sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2536 		break;
2537 	}
2538 
2539 	case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2540 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2541 		sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2542 		break;
2543 	}
2544 
2545 	case SIGNAL_TYPE_DVI_DUAL_LINK: {
2546 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2547 		sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2548 		break;
2549 	}
2550 
2551 	case SIGNAL_TYPE_LVDS: {
2552 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2553 		sink_caps.signal = SIGNAL_TYPE_LVDS;
2554 		break;
2555 	}
2556 
2557 	case SIGNAL_TYPE_EDP: {
2558 		sink_caps.transaction_type =
2559 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2560 		sink_caps.signal = SIGNAL_TYPE_EDP;
2561 		break;
2562 	}
2563 
2564 	case SIGNAL_TYPE_DISPLAY_PORT: {
2565 		sink_caps.transaction_type =
2566 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2567 		sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2568 		break;
2569 	}
2570 
2571 	default:
2572 		DC_ERROR("Invalid connector type! signal:%d\n",
2573 			link->connector_signal);
2574 		return;
2575 	}
2576 
2577 	sink_init_data.link = link;
2578 	sink_init_data.sink_signal = sink_caps.signal;
2579 
2580 	sink = dc_sink_create(&sink_init_data);
2581 	if (!sink) {
2582 		DC_ERROR("Failed to create sink!\n");
2583 		return;
2584 	}
2585 
2586 	/* dc_sink_create returns a new reference */
2587 	link->local_sink = sink;
2588 
2589 	edid_status = dm_helpers_read_local_edid(
2590 			link->ctx,
2591 			link,
2592 			sink);
2593 
2594 	if (edid_status != EDID_OK)
2595 		DC_ERROR("Failed to read EDID");
2596 
2597 }
2598 
2599 static void dm_gpureset_commit_state(struct dc_state *dc_state,
2600 				     struct amdgpu_display_manager *dm)
2601 {
2602 	struct {
2603 		struct dc_surface_update surface_updates[MAX_SURFACES];
2604 		struct dc_plane_info plane_infos[MAX_SURFACES];
2605 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
2606 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2607 		struct dc_stream_update stream_update;
2608 	} * bundle;
2609 	int k, m;
2610 
2611 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2612 
2613 	if (!bundle) {
2614 		dm_error("Failed to allocate update bundle\n");
2615 		goto cleanup;
2616 	}
2617 
2618 	for (k = 0; k < dc_state->stream_count; k++) {
2619 		bundle->stream_update.stream = dc_state->streams[k];
2620 
2621 		for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2622 			bundle->surface_updates[m].surface =
2623 				dc_state->stream_status->plane_states[m];
2624 			bundle->surface_updates[m].surface->force_full_update =
2625 				true;
2626 		}
2627 		dc_commit_updates_for_stream(
2628 			dm->dc, bundle->surface_updates,
2629 			dc_state->stream_status->plane_count,
2630 			dc_state->streams[k], &bundle->stream_update, dc_state);
2631 	}
2632 
2633 cleanup:
2634 	kfree(bundle);
2635 
2636 	return;
2637 }
2638 
2639 static int dm_resume(void *handle)
2640 {
2641 	struct amdgpu_device *adev = handle;
2642 	struct drm_device *ddev = adev_to_drm(adev);
2643 	struct amdgpu_display_manager *dm = &adev->dm;
2644 	struct amdgpu_dm_connector *aconnector;
2645 	struct drm_connector *connector;
2646 	struct drm_connector_list_iter iter;
2647 	struct drm_crtc *crtc;
2648 	struct drm_crtc_state *new_crtc_state;
2649 	struct dm_crtc_state *dm_new_crtc_state;
2650 	struct drm_plane *plane;
2651 	struct drm_plane_state *new_plane_state;
2652 	struct dm_plane_state *dm_new_plane_state;
2653 	struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2654 	enum dc_connection_type new_connection_type = dc_connection_none;
2655 	struct dc_state *dc_state;
2656 	int i, r, j;
2657 
2658 	if (amdgpu_in_reset(adev)) {
2659 		dc_state = dm->cached_dc_state;
2660 
2661 		/*
2662 		 * The dc->current_state is backed up into dm->cached_dc_state
2663 		 * before we commit 0 streams.
2664 		 *
2665 		 * DC will clear link encoder assignments on the real state
2666 		 * but the changes won't propagate over to the copy we made
2667 		 * before the 0 streams commit.
2668 		 *
2669 		 * DC expects that link encoder assignments are *not* valid
2670 		 * when committing a state, so as a workaround we can copy
2671 		 * off of the current state.
2672 		 *
2673 		 * We lose the previous assignments, but we had already
2674 		 * commit 0 streams anyway.
2675 		 */
2676 		link_enc_cfg_copy(adev->dm.dc->current_state, dc_state);
2677 
2678 		r = dm_dmub_hw_init(adev);
2679 		if (r)
2680 			DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2681 
2682 		dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2683 		dc_resume(dm->dc);
2684 
2685 		amdgpu_dm_irq_resume_early(adev);
2686 
2687 		for (i = 0; i < dc_state->stream_count; i++) {
2688 			dc_state->streams[i]->mode_changed = true;
2689 			for (j = 0; j < dc_state->stream_status[i].plane_count; j++) {
2690 				dc_state->stream_status[i].plane_states[j]->update_flags.raw
2691 					= 0xffffffff;
2692 			}
2693 		}
2694 
2695 		if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
2696 			amdgpu_dm_outbox_init(adev);
2697 			dc_enable_dmub_outbox(adev->dm.dc);
2698 		}
2699 
2700 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
2701 
2702 		dm_gpureset_commit_state(dm->cached_dc_state, dm);
2703 
2704 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2705 
2706 		dc_release_state(dm->cached_dc_state);
2707 		dm->cached_dc_state = NULL;
2708 
2709 		amdgpu_dm_irq_resume_late(adev);
2710 
2711 		mutex_unlock(&dm->dc_lock);
2712 
2713 		return 0;
2714 	}
2715 	/* Recreate dc_state - DC invalidates it when setting power state to S3. */
2716 	dc_release_state(dm_state->context);
2717 	dm_state->context = dc_create_state(dm->dc);
2718 	/* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2719 	dc_resource_state_construct(dm->dc, dm_state->context);
2720 
2721 	/* Before powering on DC we need to re-initialize DMUB. */
2722 	dm_dmub_hw_resume(adev);
2723 
2724 	/* Re-enable outbox interrupts for DPIA. */
2725 	if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
2726 		amdgpu_dm_outbox_init(adev);
2727 		dc_enable_dmub_outbox(adev->dm.dc);
2728 	}
2729 
2730 	/* power on hardware */
2731 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2732 
2733 	/* program HPD filter */
2734 	dc_resume(dm->dc);
2735 
2736 	/*
2737 	 * early enable HPD Rx IRQ, should be done before set mode as short
2738 	 * pulse interrupts are used for MST
2739 	 */
2740 	amdgpu_dm_irq_resume_early(adev);
2741 
2742 	/* On resume we need to rewrite the MSTM control bits to enable MST*/
2743 	s3_handle_mst(ddev, false);
2744 
2745 	/* Do detection*/
2746 	drm_connector_list_iter_begin(ddev, &iter);
2747 	drm_for_each_connector_iter(connector, &iter) {
2748 		aconnector = to_amdgpu_dm_connector(connector);
2749 
2750 		/*
2751 		 * this is the case when traversing through already created
2752 		 * MST connectors, should be skipped
2753 		 */
2754 		if (aconnector->dc_link &&
2755 		    aconnector->dc_link->type == dc_connection_mst_branch)
2756 			continue;
2757 
2758 		mutex_lock(&aconnector->hpd_lock);
2759 		if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2760 			DRM_ERROR("KMS: Failed to detect connector\n");
2761 
2762 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
2763 			emulated_link_detect(aconnector->dc_link);
2764 		} else {
2765 			mutex_lock(&dm->dc_lock);
2766 			dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2767 			mutex_unlock(&dm->dc_lock);
2768 		}
2769 
2770 		if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2771 			aconnector->fake_enable = false;
2772 
2773 		if (aconnector->dc_sink)
2774 			dc_sink_release(aconnector->dc_sink);
2775 		aconnector->dc_sink = NULL;
2776 		amdgpu_dm_update_connector_after_detect(aconnector);
2777 		mutex_unlock(&aconnector->hpd_lock);
2778 	}
2779 	drm_connector_list_iter_end(&iter);
2780 
2781 	/* Force mode set in atomic commit */
2782 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2783 		new_crtc_state->active_changed = true;
2784 
2785 	/*
2786 	 * atomic_check is expected to create the dc states. We need to release
2787 	 * them here, since they were duplicated as part of the suspend
2788 	 * procedure.
2789 	 */
2790 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2791 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2792 		if (dm_new_crtc_state->stream) {
2793 			WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2794 			dc_stream_release(dm_new_crtc_state->stream);
2795 			dm_new_crtc_state->stream = NULL;
2796 		}
2797 	}
2798 
2799 	for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2800 		dm_new_plane_state = to_dm_plane_state(new_plane_state);
2801 		if (dm_new_plane_state->dc_state) {
2802 			WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2803 			dc_plane_state_release(dm_new_plane_state->dc_state);
2804 			dm_new_plane_state->dc_state = NULL;
2805 		}
2806 	}
2807 
2808 	drm_atomic_helper_resume(ddev, dm->cached_state);
2809 
2810 	dm->cached_state = NULL;
2811 
2812 	amdgpu_dm_irq_resume_late(adev);
2813 
2814 	amdgpu_dm_smu_write_watermarks_table(adev);
2815 
2816 	return 0;
2817 }
2818 
2819 /**
2820  * DOC: DM Lifecycle
2821  *
2822  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2823  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2824  * the base driver's device list to be initialized and torn down accordingly.
2825  *
2826  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2827  */
2828 
2829 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2830 	.name = "dm",
2831 	.early_init = dm_early_init,
2832 	.late_init = dm_late_init,
2833 	.sw_init = dm_sw_init,
2834 	.sw_fini = dm_sw_fini,
2835 	.early_fini = amdgpu_dm_early_fini,
2836 	.hw_init = dm_hw_init,
2837 	.hw_fini = dm_hw_fini,
2838 	.suspend = dm_suspend,
2839 	.resume = dm_resume,
2840 	.is_idle = dm_is_idle,
2841 	.wait_for_idle = dm_wait_for_idle,
2842 	.check_soft_reset = dm_check_soft_reset,
2843 	.soft_reset = dm_soft_reset,
2844 	.set_clockgating_state = dm_set_clockgating_state,
2845 	.set_powergating_state = dm_set_powergating_state,
2846 };
2847 
2848 const struct amdgpu_ip_block_version dm_ip_block =
2849 {
2850 	.type = AMD_IP_BLOCK_TYPE_DCE,
2851 	.major = 1,
2852 	.minor = 0,
2853 	.rev = 0,
2854 	.funcs = &amdgpu_dm_funcs,
2855 };
2856 
2857 
2858 /**
2859  * DOC: atomic
2860  *
2861  * *WIP*
2862  */
2863 
2864 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2865 	.fb_create = amdgpu_display_user_framebuffer_create,
2866 	.get_format_info = amd_get_format_info,
2867 	.output_poll_changed = drm_fb_helper_output_poll_changed,
2868 	.atomic_check = amdgpu_dm_atomic_check,
2869 	.atomic_commit = drm_atomic_helper_commit,
2870 };
2871 
2872 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2873 	.atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2874 };
2875 
2876 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2877 {
2878 	u32 max_avg, min_cll, max, min, q, r;
2879 	struct amdgpu_dm_backlight_caps *caps;
2880 	struct amdgpu_display_manager *dm;
2881 	struct drm_connector *conn_base;
2882 	struct amdgpu_device *adev;
2883 	struct dc_link *link = NULL;
2884 	static const u8 pre_computed_values[] = {
2885 		50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2886 		71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2887 	int i;
2888 
2889 	if (!aconnector || !aconnector->dc_link)
2890 		return;
2891 
2892 	link = aconnector->dc_link;
2893 	if (link->connector_signal != SIGNAL_TYPE_EDP)
2894 		return;
2895 
2896 	conn_base = &aconnector->base;
2897 	adev = drm_to_adev(conn_base->dev);
2898 	dm = &adev->dm;
2899 	for (i = 0; i < dm->num_of_edps; i++) {
2900 		if (link == dm->backlight_link[i])
2901 			break;
2902 	}
2903 	if (i >= dm->num_of_edps)
2904 		return;
2905 	caps = &dm->backlight_caps[i];
2906 	caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2907 	caps->aux_support = false;
2908 	max_avg = conn_base->hdr_sink_metadata.hdmi_type1.max_fall;
2909 	min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2910 
2911 	if (caps->ext_caps->bits.oled == 1 /*||
2912 	    caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2913 	    caps->ext_caps->bits.hdr_aux_backlight_control == 1*/)
2914 		caps->aux_support = true;
2915 
2916 	if (amdgpu_backlight == 0)
2917 		caps->aux_support = false;
2918 	else if (amdgpu_backlight == 1)
2919 		caps->aux_support = true;
2920 
2921 	/* From the specification (CTA-861-G), for calculating the maximum
2922 	 * luminance we need to use:
2923 	 *	Luminance = 50*2**(CV/32)
2924 	 * Where CV is a one-byte value.
2925 	 * For calculating this expression we may need float point precision;
2926 	 * to avoid this complexity level, we take advantage that CV is divided
2927 	 * by a constant. From the Euclids division algorithm, we know that CV
2928 	 * can be written as: CV = 32*q + r. Next, we replace CV in the
2929 	 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2930 	 * need to pre-compute the value of r/32. For pre-computing the values
2931 	 * We just used the following Ruby line:
2932 	 *	(0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2933 	 * The results of the above expressions can be verified at
2934 	 * pre_computed_values.
2935 	 */
2936 	q = max_avg >> 5;
2937 	r = max_avg % 32;
2938 	max = (1 << q) * pre_computed_values[r];
2939 
2940 	// min luminance: maxLum * (CV/255)^2 / 100
2941 	q = DIV_ROUND_CLOSEST(min_cll, 255);
2942 	min = max * DIV_ROUND_CLOSEST((q * q), 100);
2943 
2944 	caps->aux_max_input_signal = max;
2945 	caps->aux_min_input_signal = min;
2946 }
2947 
2948 void amdgpu_dm_update_connector_after_detect(
2949 		struct amdgpu_dm_connector *aconnector)
2950 {
2951 	struct drm_connector *connector = &aconnector->base;
2952 	struct drm_device *dev = connector->dev;
2953 	struct dc_sink *sink;
2954 
2955 	/* MST handled by drm_mst framework */
2956 	if (aconnector->mst_mgr.mst_state == true)
2957 		return;
2958 
2959 	sink = aconnector->dc_link->local_sink;
2960 	if (sink)
2961 		dc_sink_retain(sink);
2962 
2963 	/*
2964 	 * Edid mgmt connector gets first update only in mode_valid hook and then
2965 	 * the connector sink is set to either fake or physical sink depends on link status.
2966 	 * Skip if already done during boot.
2967 	 */
2968 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2969 			&& aconnector->dc_em_sink) {
2970 
2971 		/*
2972 		 * For S3 resume with headless use eml_sink to fake stream
2973 		 * because on resume connector->sink is set to NULL
2974 		 */
2975 		mutex_lock(&dev->mode_config.mutex);
2976 
2977 		if (sink) {
2978 			if (aconnector->dc_sink) {
2979 				amdgpu_dm_update_freesync_caps(connector, NULL);
2980 				/*
2981 				 * retain and release below are used to
2982 				 * bump up refcount for sink because the link doesn't point
2983 				 * to it anymore after disconnect, so on next crtc to connector
2984 				 * reshuffle by UMD we will get into unwanted dc_sink release
2985 				 */
2986 				dc_sink_release(aconnector->dc_sink);
2987 			}
2988 			aconnector->dc_sink = sink;
2989 			dc_sink_retain(aconnector->dc_sink);
2990 			amdgpu_dm_update_freesync_caps(connector,
2991 					aconnector->edid);
2992 		} else {
2993 			amdgpu_dm_update_freesync_caps(connector, NULL);
2994 			if (!aconnector->dc_sink) {
2995 				aconnector->dc_sink = aconnector->dc_em_sink;
2996 				dc_sink_retain(aconnector->dc_sink);
2997 			}
2998 		}
2999 
3000 		mutex_unlock(&dev->mode_config.mutex);
3001 
3002 		if (sink)
3003 			dc_sink_release(sink);
3004 		return;
3005 	}
3006 
3007 	/*
3008 	 * TODO: temporary guard to look for proper fix
3009 	 * if this sink is MST sink, we should not do anything
3010 	 */
3011 	if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
3012 		dc_sink_release(sink);
3013 		return;
3014 	}
3015 
3016 	if (aconnector->dc_sink == sink) {
3017 		/*
3018 		 * We got a DP short pulse (Link Loss, DP CTS, etc...).
3019 		 * Do nothing!!
3020 		 */
3021 		DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
3022 				aconnector->connector_id);
3023 		if (sink)
3024 			dc_sink_release(sink);
3025 		return;
3026 	}
3027 
3028 	DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
3029 		aconnector->connector_id, aconnector->dc_sink, sink);
3030 
3031 	mutex_lock(&dev->mode_config.mutex);
3032 
3033 	/*
3034 	 * 1. Update status of the drm connector
3035 	 * 2. Send an event and let userspace tell us what to do
3036 	 */
3037 	if (sink) {
3038 		/*
3039 		 * TODO: check if we still need the S3 mode update workaround.
3040 		 * If yes, put it here.
3041 		 */
3042 		if (aconnector->dc_sink) {
3043 			amdgpu_dm_update_freesync_caps(connector, NULL);
3044 			dc_sink_release(aconnector->dc_sink);
3045 		}
3046 
3047 		aconnector->dc_sink = sink;
3048 		dc_sink_retain(aconnector->dc_sink);
3049 		if (sink->dc_edid.length == 0) {
3050 			aconnector->edid = NULL;
3051 			if (aconnector->dc_link->aux_mode) {
3052 				drm_dp_cec_unset_edid(
3053 					&aconnector->dm_dp_aux.aux);
3054 			}
3055 		} else {
3056 			aconnector->edid =
3057 				(struct edid *)sink->dc_edid.raw_edid;
3058 
3059 			if (aconnector->dc_link->aux_mode)
3060 				drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
3061 						    aconnector->edid);
3062 		}
3063 
3064 		drm_connector_update_edid_property(connector, aconnector->edid);
3065 		amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
3066 		update_connector_ext_caps(aconnector);
3067 	} else {
3068 		drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
3069 		amdgpu_dm_update_freesync_caps(connector, NULL);
3070 		drm_connector_update_edid_property(connector, NULL);
3071 		aconnector->num_modes = 0;
3072 		dc_sink_release(aconnector->dc_sink);
3073 		aconnector->dc_sink = NULL;
3074 		aconnector->edid = NULL;
3075 #ifdef CONFIG_DRM_AMD_DC_HDCP
3076 		/* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
3077 		if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
3078 			connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
3079 #endif
3080 	}
3081 
3082 	mutex_unlock(&dev->mode_config.mutex);
3083 
3084 	update_subconnector_property(aconnector);
3085 
3086 	if (sink)
3087 		dc_sink_release(sink);
3088 }
3089 
3090 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
3091 {
3092 	struct drm_connector *connector = &aconnector->base;
3093 	struct drm_device *dev = connector->dev;
3094 	enum dc_connection_type new_connection_type = dc_connection_none;
3095 	struct amdgpu_device *adev = drm_to_adev(dev);
3096 #ifdef CONFIG_DRM_AMD_DC_HDCP
3097 	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
3098 #endif
3099 	bool ret = false;
3100 
3101 	if (adev->dm.disable_hpd_irq)
3102 		return;
3103 
3104 	/*
3105 	 * In case of failure or MST no need to update connector status or notify the OS
3106 	 * since (for MST case) MST does this in its own context.
3107 	 */
3108 	mutex_lock(&aconnector->hpd_lock);
3109 
3110 #ifdef CONFIG_DRM_AMD_DC_HDCP
3111 	if (adev->dm.hdcp_workqueue) {
3112 		hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
3113 		dm_con_state->update_hdcp = true;
3114 	}
3115 #endif
3116 	if (aconnector->fake_enable)
3117 		aconnector->fake_enable = false;
3118 
3119 	if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
3120 		DRM_ERROR("KMS: Failed to detect connector\n");
3121 
3122 	if (aconnector->base.force && new_connection_type == dc_connection_none) {
3123 		emulated_link_detect(aconnector->dc_link);
3124 
3125 		drm_modeset_lock_all(dev);
3126 		dm_restore_drm_connector_state(dev, connector);
3127 		drm_modeset_unlock_all(dev);
3128 
3129 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3130 			drm_kms_helper_connector_hotplug_event(connector);
3131 	} else {
3132 		mutex_lock(&adev->dm.dc_lock);
3133 		ret = dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
3134 		mutex_unlock(&adev->dm.dc_lock);
3135 		if (ret) {
3136 			amdgpu_dm_update_connector_after_detect(aconnector);
3137 
3138 			drm_modeset_lock_all(dev);
3139 			dm_restore_drm_connector_state(dev, connector);
3140 			drm_modeset_unlock_all(dev);
3141 
3142 			if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3143 				drm_kms_helper_connector_hotplug_event(connector);
3144 		}
3145 	}
3146 	mutex_unlock(&aconnector->hpd_lock);
3147 
3148 }
3149 
3150 static void handle_hpd_irq(void *param)
3151 {
3152 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3153 
3154 	handle_hpd_irq_helper(aconnector);
3155 
3156 }
3157 
3158 static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
3159 {
3160 	uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
3161 	uint8_t dret;
3162 	bool new_irq_handled = false;
3163 	int dpcd_addr;
3164 	int dpcd_bytes_to_read;
3165 
3166 	const int max_process_count = 30;
3167 	int process_count = 0;
3168 
3169 	const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
3170 
3171 	if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
3172 		dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
3173 		/* DPCD 0x200 - 0x201 for downstream IRQ */
3174 		dpcd_addr = DP_SINK_COUNT;
3175 	} else {
3176 		dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
3177 		/* DPCD 0x2002 - 0x2005 for downstream IRQ */
3178 		dpcd_addr = DP_SINK_COUNT_ESI;
3179 	}
3180 
3181 	dret = drm_dp_dpcd_read(
3182 		&aconnector->dm_dp_aux.aux,
3183 		dpcd_addr,
3184 		esi,
3185 		dpcd_bytes_to_read);
3186 
3187 	while (dret == dpcd_bytes_to_read &&
3188 		process_count < max_process_count) {
3189 		uint8_t retry;
3190 		dret = 0;
3191 
3192 		process_count++;
3193 
3194 		DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
3195 		/* handle HPD short pulse irq */
3196 		if (aconnector->mst_mgr.mst_state)
3197 			drm_dp_mst_hpd_irq(
3198 				&aconnector->mst_mgr,
3199 				esi,
3200 				&new_irq_handled);
3201 
3202 		if (new_irq_handled) {
3203 			/* ACK at DPCD to notify down stream */
3204 			const int ack_dpcd_bytes_to_write =
3205 				dpcd_bytes_to_read - 1;
3206 
3207 			for (retry = 0; retry < 3; retry++) {
3208 				uint8_t wret;
3209 
3210 				wret = drm_dp_dpcd_write(
3211 					&aconnector->dm_dp_aux.aux,
3212 					dpcd_addr + 1,
3213 					&esi[1],
3214 					ack_dpcd_bytes_to_write);
3215 				if (wret == ack_dpcd_bytes_to_write)
3216 					break;
3217 			}
3218 
3219 			/* check if there is new irq to be handled */
3220 			dret = drm_dp_dpcd_read(
3221 				&aconnector->dm_dp_aux.aux,
3222 				dpcd_addr,
3223 				esi,
3224 				dpcd_bytes_to_read);
3225 
3226 			new_irq_handled = false;
3227 		} else {
3228 			break;
3229 		}
3230 	}
3231 
3232 	if (process_count == max_process_count)
3233 		DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
3234 }
3235 
3236 static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq,
3237 							union hpd_irq_data hpd_irq_data)
3238 {
3239 	struct hpd_rx_irq_offload_work *offload_work =
3240 				kzalloc(sizeof(*offload_work), GFP_KERNEL);
3241 
3242 	if (!offload_work) {
3243 		DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n");
3244 		return;
3245 	}
3246 
3247 	INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work);
3248 	offload_work->data = hpd_irq_data;
3249 	offload_work->offload_wq = offload_wq;
3250 
3251 	queue_work(offload_wq->wq, &offload_work->work);
3252 	DRM_DEBUG_KMS("queue work to handle hpd_rx offload work");
3253 }
3254 
3255 static void handle_hpd_rx_irq(void *param)
3256 {
3257 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3258 	struct drm_connector *connector = &aconnector->base;
3259 	struct drm_device *dev = connector->dev;
3260 	struct dc_link *dc_link = aconnector->dc_link;
3261 	bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
3262 	bool result = false;
3263 	enum dc_connection_type new_connection_type = dc_connection_none;
3264 	struct amdgpu_device *adev = drm_to_adev(dev);
3265 	union hpd_irq_data hpd_irq_data;
3266 	bool link_loss = false;
3267 	bool has_left_work = false;
3268 	int idx = aconnector->base.index;
3269 	struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx];
3270 
3271 	memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
3272 
3273 	if (adev->dm.disable_hpd_irq)
3274 		return;
3275 
3276 	/*
3277 	 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
3278 	 * conflict, after implement i2c helper, this mutex should be
3279 	 * retired.
3280 	 */
3281 	mutex_lock(&aconnector->hpd_lock);
3282 
3283 	result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data,
3284 						&link_loss, true, &has_left_work);
3285 
3286 	if (!has_left_work)
3287 		goto out;
3288 
3289 	if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
3290 		schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3291 		goto out;
3292 	}
3293 
3294 	if (dc_link_dp_allow_hpd_rx_irq(dc_link)) {
3295 		if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
3296 			hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
3297 			dm_handle_mst_sideband_msg(aconnector);
3298 			goto out;
3299 		}
3300 
3301 		if (link_loss) {
3302 			bool skip = false;
3303 
3304 			spin_lock(&offload_wq->offload_lock);
3305 			skip = offload_wq->is_handling_link_loss;
3306 
3307 			if (!skip)
3308 				offload_wq->is_handling_link_loss = true;
3309 
3310 			spin_unlock(&offload_wq->offload_lock);
3311 
3312 			if (!skip)
3313 				schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3314 
3315 			goto out;
3316 		}
3317 	}
3318 
3319 out:
3320 	if (result && !is_mst_root_connector) {
3321 		/* Downstream Port status changed. */
3322 		if (!dc_link_detect_sink(dc_link, &new_connection_type))
3323 			DRM_ERROR("KMS: Failed to detect connector\n");
3324 
3325 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
3326 			emulated_link_detect(dc_link);
3327 
3328 			if (aconnector->fake_enable)
3329 				aconnector->fake_enable = false;
3330 
3331 			amdgpu_dm_update_connector_after_detect(aconnector);
3332 
3333 
3334 			drm_modeset_lock_all(dev);
3335 			dm_restore_drm_connector_state(dev, connector);
3336 			drm_modeset_unlock_all(dev);
3337 
3338 			drm_kms_helper_connector_hotplug_event(connector);
3339 		} else {
3340 			bool ret = false;
3341 
3342 			mutex_lock(&adev->dm.dc_lock);
3343 			ret = dc_link_detect(dc_link, DETECT_REASON_HPDRX);
3344 			mutex_unlock(&adev->dm.dc_lock);
3345 
3346 			if (ret) {
3347 				if (aconnector->fake_enable)
3348 					aconnector->fake_enable = false;
3349 
3350 				amdgpu_dm_update_connector_after_detect(aconnector);
3351 
3352 				drm_modeset_lock_all(dev);
3353 				dm_restore_drm_connector_state(dev, connector);
3354 				drm_modeset_unlock_all(dev);
3355 
3356 				drm_kms_helper_connector_hotplug_event(connector);
3357 			}
3358 		}
3359 	}
3360 #ifdef CONFIG_DRM_AMD_DC_HDCP
3361 	if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
3362 		if (adev->dm.hdcp_workqueue)
3363 			hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
3364 	}
3365 #endif
3366 
3367 	if (dc_link->type != dc_connection_mst_branch)
3368 		drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
3369 
3370 	mutex_unlock(&aconnector->hpd_lock);
3371 }
3372 
3373 static void register_hpd_handlers(struct amdgpu_device *adev)
3374 {
3375 	struct drm_device *dev = adev_to_drm(adev);
3376 	struct drm_connector *connector;
3377 	struct amdgpu_dm_connector *aconnector;
3378 	const struct dc_link *dc_link;
3379 	struct dc_interrupt_params int_params = {0};
3380 
3381 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3382 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3383 
3384 	list_for_each_entry(connector,
3385 			&dev->mode_config.connector_list, head)	{
3386 
3387 		aconnector = to_amdgpu_dm_connector(connector);
3388 		dc_link = aconnector->dc_link;
3389 
3390 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
3391 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3392 			int_params.irq_source = dc_link->irq_source_hpd;
3393 
3394 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
3395 					handle_hpd_irq,
3396 					(void *) aconnector);
3397 		}
3398 
3399 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
3400 
3401 			/* Also register for DP short pulse (hpd_rx). */
3402 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3403 			int_params.irq_source =	dc_link->irq_source_hpd_rx;
3404 
3405 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
3406 					handle_hpd_rx_irq,
3407 					(void *) aconnector);
3408 
3409 			if (adev->dm.hpd_rx_offload_wq)
3410 				adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
3411 					aconnector;
3412 		}
3413 	}
3414 }
3415 
3416 #if defined(CONFIG_DRM_AMD_DC_SI)
3417 /* Register IRQ sources and initialize IRQ callbacks */
3418 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
3419 {
3420 	struct dc *dc = adev->dm.dc;
3421 	struct common_irq_params *c_irq_params;
3422 	struct dc_interrupt_params int_params = {0};
3423 	int r;
3424 	int i;
3425 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3426 
3427 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3428 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3429 
3430 	/*
3431 	 * Actions of amdgpu_irq_add_id():
3432 	 * 1. Register a set() function with base driver.
3433 	 *    Base driver will call set() function to enable/disable an
3434 	 *    interrupt in DC hardware.
3435 	 * 2. Register amdgpu_dm_irq_handler().
3436 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3437 	 *    coming from DC hardware.
3438 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3439 	 *    for acknowledging and handling. */
3440 
3441 	/* Use VBLANK interrupt */
3442 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
3443 		r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
3444 		if (r) {
3445 			DRM_ERROR("Failed to add crtc irq id!\n");
3446 			return r;
3447 		}
3448 
3449 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3450 		int_params.irq_source =
3451 			dc_interrupt_to_irq_source(dc, i+1 , 0);
3452 
3453 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3454 
3455 		c_irq_params->adev = adev;
3456 		c_irq_params->irq_src = int_params.irq_source;
3457 
3458 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3459 				dm_crtc_high_irq, c_irq_params);
3460 	}
3461 
3462 	/* Use GRPH_PFLIP interrupt */
3463 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3464 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3465 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3466 		if (r) {
3467 			DRM_ERROR("Failed to add page flip irq id!\n");
3468 			return r;
3469 		}
3470 
3471 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3472 		int_params.irq_source =
3473 			dc_interrupt_to_irq_source(dc, i, 0);
3474 
3475 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3476 
3477 		c_irq_params->adev = adev;
3478 		c_irq_params->irq_src = int_params.irq_source;
3479 
3480 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3481 				dm_pflip_high_irq, c_irq_params);
3482 
3483 	}
3484 
3485 	/* HPD */
3486 	r = amdgpu_irq_add_id(adev, client_id,
3487 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3488 	if (r) {
3489 		DRM_ERROR("Failed to add hpd irq id!\n");
3490 		return r;
3491 	}
3492 
3493 	register_hpd_handlers(adev);
3494 
3495 	return 0;
3496 }
3497 #endif
3498 
3499 /* Register IRQ sources and initialize IRQ callbacks */
3500 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
3501 {
3502 	struct dc *dc = adev->dm.dc;
3503 	struct common_irq_params *c_irq_params;
3504 	struct dc_interrupt_params int_params = {0};
3505 	int r;
3506 	int i;
3507 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3508 
3509 	if (adev->family >= AMDGPU_FAMILY_AI)
3510 		client_id = SOC15_IH_CLIENTID_DCE;
3511 
3512 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3513 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3514 
3515 	/*
3516 	 * Actions of amdgpu_irq_add_id():
3517 	 * 1. Register a set() function with base driver.
3518 	 *    Base driver will call set() function to enable/disable an
3519 	 *    interrupt in DC hardware.
3520 	 * 2. Register amdgpu_dm_irq_handler().
3521 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3522 	 *    coming from DC hardware.
3523 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3524 	 *    for acknowledging and handling. */
3525 
3526 	/* Use VBLANK interrupt */
3527 	for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
3528 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
3529 		if (r) {
3530 			DRM_ERROR("Failed to add crtc irq id!\n");
3531 			return r;
3532 		}
3533 
3534 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3535 		int_params.irq_source =
3536 			dc_interrupt_to_irq_source(dc, i, 0);
3537 
3538 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3539 
3540 		c_irq_params->adev = adev;
3541 		c_irq_params->irq_src = int_params.irq_source;
3542 
3543 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3544 				dm_crtc_high_irq, c_irq_params);
3545 	}
3546 
3547 	/* Use VUPDATE interrupt */
3548 	for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
3549 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
3550 		if (r) {
3551 			DRM_ERROR("Failed to add vupdate irq id!\n");
3552 			return r;
3553 		}
3554 
3555 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3556 		int_params.irq_source =
3557 			dc_interrupt_to_irq_source(dc, i, 0);
3558 
3559 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3560 
3561 		c_irq_params->adev = adev;
3562 		c_irq_params->irq_src = int_params.irq_source;
3563 
3564 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3565 				dm_vupdate_high_irq, c_irq_params);
3566 	}
3567 
3568 	/* Use GRPH_PFLIP interrupt */
3569 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3570 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3571 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3572 		if (r) {
3573 			DRM_ERROR("Failed to add page flip irq id!\n");
3574 			return r;
3575 		}
3576 
3577 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3578 		int_params.irq_source =
3579 			dc_interrupt_to_irq_source(dc, i, 0);
3580 
3581 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3582 
3583 		c_irq_params->adev = adev;
3584 		c_irq_params->irq_src = int_params.irq_source;
3585 
3586 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3587 				dm_pflip_high_irq, c_irq_params);
3588 
3589 	}
3590 
3591 	/* HPD */
3592 	r = amdgpu_irq_add_id(adev, client_id,
3593 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3594 	if (r) {
3595 		DRM_ERROR("Failed to add hpd irq id!\n");
3596 		return r;
3597 	}
3598 
3599 	register_hpd_handlers(adev);
3600 
3601 	return 0;
3602 }
3603 
3604 /* Register IRQ sources and initialize IRQ callbacks */
3605 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3606 {
3607 	struct dc *dc = adev->dm.dc;
3608 	struct common_irq_params *c_irq_params;
3609 	struct dc_interrupt_params int_params = {0};
3610 	int r;
3611 	int i;
3612 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3613 	static const unsigned int vrtl_int_srcid[] = {
3614 		DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3615 		DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3616 		DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3617 		DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3618 		DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3619 		DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3620 	};
3621 #endif
3622 
3623 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3624 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3625 
3626 	/*
3627 	 * Actions of amdgpu_irq_add_id():
3628 	 * 1. Register a set() function with base driver.
3629 	 *    Base driver will call set() function to enable/disable an
3630 	 *    interrupt in DC hardware.
3631 	 * 2. Register amdgpu_dm_irq_handler().
3632 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3633 	 *    coming from DC hardware.
3634 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3635 	 *    for acknowledging and handling.
3636 	 */
3637 
3638 	/* Use VSTARTUP interrupt */
3639 	for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3640 			i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3641 			i++) {
3642 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
3643 
3644 		if (r) {
3645 			DRM_ERROR("Failed to add crtc irq id!\n");
3646 			return r;
3647 		}
3648 
3649 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3650 		int_params.irq_source =
3651 			dc_interrupt_to_irq_source(dc, i, 0);
3652 
3653 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3654 
3655 		c_irq_params->adev = adev;
3656 		c_irq_params->irq_src = int_params.irq_source;
3657 
3658 		amdgpu_dm_irq_register_interrupt(
3659 			adev, &int_params, dm_crtc_high_irq, c_irq_params);
3660 	}
3661 
3662 	/* Use otg vertical line interrupt */
3663 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3664 	for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3665 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3666 				vrtl_int_srcid[i], &adev->vline0_irq);
3667 
3668 		if (r) {
3669 			DRM_ERROR("Failed to add vline0 irq id!\n");
3670 			return r;
3671 		}
3672 
3673 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3674 		int_params.irq_source =
3675 			dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3676 
3677 		if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3678 			DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3679 			break;
3680 		}
3681 
3682 		c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3683 					- DC_IRQ_SOURCE_DC1_VLINE0];
3684 
3685 		c_irq_params->adev = adev;
3686 		c_irq_params->irq_src = int_params.irq_source;
3687 
3688 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3689 				dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3690 	}
3691 #endif
3692 
3693 	/* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3694 	 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3695 	 * to trigger at end of each vblank, regardless of state of the lock,
3696 	 * matching DCE behaviour.
3697 	 */
3698 	for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3699 	     i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3700 	     i++) {
3701 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3702 
3703 		if (r) {
3704 			DRM_ERROR("Failed to add vupdate irq id!\n");
3705 			return r;
3706 		}
3707 
3708 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3709 		int_params.irq_source =
3710 			dc_interrupt_to_irq_source(dc, i, 0);
3711 
3712 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3713 
3714 		c_irq_params->adev = adev;
3715 		c_irq_params->irq_src = int_params.irq_source;
3716 
3717 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3718 				dm_vupdate_high_irq, c_irq_params);
3719 	}
3720 
3721 	/* Use GRPH_PFLIP interrupt */
3722 	for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3723 			i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + dc->caps.max_otg_num - 1;
3724 			i++) {
3725 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
3726 		if (r) {
3727 			DRM_ERROR("Failed to add page flip irq id!\n");
3728 			return r;
3729 		}
3730 
3731 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3732 		int_params.irq_source =
3733 			dc_interrupt_to_irq_source(dc, i, 0);
3734 
3735 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3736 
3737 		c_irq_params->adev = adev;
3738 		c_irq_params->irq_src = int_params.irq_source;
3739 
3740 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3741 				dm_pflip_high_irq, c_irq_params);
3742 
3743 	}
3744 
3745 	/* HPD */
3746 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3747 			&adev->hpd_irq);
3748 	if (r) {
3749 		DRM_ERROR("Failed to add hpd irq id!\n");
3750 		return r;
3751 	}
3752 
3753 	register_hpd_handlers(adev);
3754 
3755 	return 0;
3756 }
3757 /* Register Outbox IRQ sources and initialize IRQ callbacks */
3758 static int register_outbox_irq_handlers(struct amdgpu_device *adev)
3759 {
3760 	struct dc *dc = adev->dm.dc;
3761 	struct common_irq_params *c_irq_params;
3762 	struct dc_interrupt_params int_params = {0};
3763 	int r, i;
3764 
3765 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3766 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3767 
3768 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
3769 			&adev->dmub_outbox_irq);
3770 	if (r) {
3771 		DRM_ERROR("Failed to add outbox irq id!\n");
3772 		return r;
3773 	}
3774 
3775 	if (dc->ctx->dmub_srv) {
3776 		i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
3777 		int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3778 		int_params.irq_source =
3779 		dc_interrupt_to_irq_source(dc, i, 0);
3780 
3781 		c_irq_params = &adev->dm.dmub_outbox_params[0];
3782 
3783 		c_irq_params->adev = adev;
3784 		c_irq_params->irq_src = int_params.irq_source;
3785 
3786 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3787 				dm_dmub_outbox1_low_irq, c_irq_params);
3788 	}
3789 
3790 	return 0;
3791 }
3792 
3793 /*
3794  * Acquires the lock for the atomic state object and returns
3795  * the new atomic state.
3796  *
3797  * This should only be called during atomic check.
3798  */
3799 int dm_atomic_get_state(struct drm_atomic_state *state,
3800 			struct dm_atomic_state **dm_state)
3801 {
3802 	struct drm_device *dev = state->dev;
3803 	struct amdgpu_device *adev = drm_to_adev(dev);
3804 	struct amdgpu_display_manager *dm = &adev->dm;
3805 	struct drm_private_state *priv_state;
3806 
3807 	if (*dm_state)
3808 		return 0;
3809 
3810 	priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3811 	if (IS_ERR(priv_state))
3812 		return PTR_ERR(priv_state);
3813 
3814 	*dm_state = to_dm_atomic_state(priv_state);
3815 
3816 	return 0;
3817 }
3818 
3819 static struct dm_atomic_state *
3820 dm_atomic_get_new_state(struct drm_atomic_state *state)
3821 {
3822 	struct drm_device *dev = state->dev;
3823 	struct amdgpu_device *adev = drm_to_adev(dev);
3824 	struct amdgpu_display_manager *dm = &adev->dm;
3825 	struct drm_private_obj *obj;
3826 	struct drm_private_state *new_obj_state;
3827 	int i;
3828 
3829 	for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3830 		if (obj->funcs == dm->atomic_obj.funcs)
3831 			return to_dm_atomic_state(new_obj_state);
3832 	}
3833 
3834 	return NULL;
3835 }
3836 
3837 static struct drm_private_state *
3838 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3839 {
3840 	struct dm_atomic_state *old_state, *new_state;
3841 
3842 	new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3843 	if (!new_state)
3844 		return NULL;
3845 
3846 	__drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3847 
3848 	old_state = to_dm_atomic_state(obj->state);
3849 
3850 	if (old_state && old_state->context)
3851 		new_state->context = dc_copy_state(old_state->context);
3852 
3853 	if (!new_state->context) {
3854 		kfree(new_state);
3855 		return NULL;
3856 	}
3857 
3858 	return &new_state->base;
3859 }
3860 
3861 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3862 				    struct drm_private_state *state)
3863 {
3864 	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3865 
3866 	if (dm_state && dm_state->context)
3867 		dc_release_state(dm_state->context);
3868 
3869 	kfree(dm_state);
3870 }
3871 
3872 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3873 	.atomic_duplicate_state = dm_atomic_duplicate_state,
3874 	.atomic_destroy_state = dm_atomic_destroy_state,
3875 };
3876 
3877 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3878 {
3879 	struct dm_atomic_state *state;
3880 	int r;
3881 
3882 	adev->mode_info.mode_config_initialized = true;
3883 
3884 	adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3885 	adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3886 
3887 	adev_to_drm(adev)->mode_config.max_width = 16384;
3888 	adev_to_drm(adev)->mode_config.max_height = 16384;
3889 
3890 	adev_to_drm(adev)->mode_config.preferred_depth = 24;
3891 	/* disable prefer shadow for now due to hibernation issues */
3892 	adev_to_drm(adev)->mode_config.prefer_shadow = 0;
3893 	/* indicates support for immediate flip */
3894 	adev_to_drm(adev)->mode_config.async_page_flip = true;
3895 
3896 	adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3897 
3898 	state = kzalloc(sizeof(*state), GFP_KERNEL);
3899 	if (!state)
3900 		return -ENOMEM;
3901 
3902 	state->context = dc_create_state(adev->dm.dc);
3903 	if (!state->context) {
3904 		kfree(state);
3905 		return -ENOMEM;
3906 	}
3907 
3908 	dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3909 
3910 	drm_atomic_private_obj_init(adev_to_drm(adev),
3911 				    &adev->dm.atomic_obj,
3912 				    &state->base,
3913 				    &dm_atomic_state_funcs);
3914 
3915 	r = amdgpu_display_modeset_create_props(adev);
3916 	if (r) {
3917 		dc_release_state(state->context);
3918 		kfree(state);
3919 		return r;
3920 	}
3921 
3922 	r = amdgpu_dm_audio_init(adev);
3923 	if (r) {
3924 		dc_release_state(state->context);
3925 		kfree(state);
3926 		return r;
3927 	}
3928 
3929 	return 0;
3930 }
3931 
3932 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3933 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3934 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3935 
3936 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
3937 					    int bl_idx)
3938 {
3939 #if defined(CONFIG_ACPI)
3940 	struct amdgpu_dm_backlight_caps caps;
3941 
3942 	memset(&caps, 0, sizeof(caps));
3943 
3944 	if (dm->backlight_caps[bl_idx].caps_valid)
3945 		return;
3946 
3947 	amdgpu_acpi_get_backlight_caps(&caps);
3948 	if (caps.caps_valid) {
3949 		dm->backlight_caps[bl_idx].caps_valid = true;
3950 		if (caps.aux_support)
3951 			return;
3952 		dm->backlight_caps[bl_idx].min_input_signal = caps.min_input_signal;
3953 		dm->backlight_caps[bl_idx].max_input_signal = caps.max_input_signal;
3954 	} else {
3955 		dm->backlight_caps[bl_idx].min_input_signal =
3956 				AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3957 		dm->backlight_caps[bl_idx].max_input_signal =
3958 				AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3959 	}
3960 #else
3961 	if (dm->backlight_caps[bl_idx].aux_support)
3962 		return;
3963 
3964 	dm->backlight_caps[bl_idx].min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3965 	dm->backlight_caps[bl_idx].max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3966 #endif
3967 }
3968 
3969 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3970 				unsigned *min, unsigned *max)
3971 {
3972 	if (!caps)
3973 		return 0;
3974 
3975 	if (caps->aux_support) {
3976 		// Firmware limits are in nits, DC API wants millinits.
3977 		*max = 1000 * caps->aux_max_input_signal;
3978 		*min = 1000 * caps->aux_min_input_signal;
3979 	} else {
3980 		// Firmware limits are 8-bit, PWM control is 16-bit.
3981 		*max = 0x101 * caps->max_input_signal;
3982 		*min = 0x101 * caps->min_input_signal;
3983 	}
3984 	return 1;
3985 }
3986 
3987 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3988 					uint32_t brightness)
3989 {
3990 	unsigned min, max;
3991 
3992 	if (!get_brightness_range(caps, &min, &max))
3993 		return brightness;
3994 
3995 	// Rescale 0..255 to min..max
3996 	return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3997 				       AMDGPU_MAX_BL_LEVEL);
3998 }
3999 
4000 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
4001 				      uint32_t brightness)
4002 {
4003 	unsigned min, max;
4004 
4005 	if (!get_brightness_range(caps, &min, &max))
4006 		return brightness;
4007 
4008 	if (brightness < min)
4009 		return 0;
4010 	// Rescale min..max to 0..255
4011 	return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
4012 				 max - min);
4013 }
4014 
4015 static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
4016 					 int bl_idx,
4017 					 u32 user_brightness)
4018 {
4019 	struct amdgpu_dm_backlight_caps caps;
4020 	struct dc_link *link;
4021 	u32 brightness;
4022 	bool rc;
4023 
4024 	amdgpu_dm_update_backlight_caps(dm, bl_idx);
4025 	caps = dm->backlight_caps[bl_idx];
4026 
4027 	dm->brightness[bl_idx] = user_brightness;
4028 	/* update scratch register */
4029 	if (bl_idx == 0)
4030 		amdgpu_atombios_scratch_regs_set_backlight_level(dm->adev, dm->brightness[bl_idx]);
4031 	brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]);
4032 	link = (struct dc_link *)dm->backlight_link[bl_idx];
4033 
4034 	/* Change brightness based on AUX property */
4035 	if (caps.aux_support) {
4036 		rc = dc_link_set_backlight_level_nits(link, true, brightness,
4037 						      AUX_BL_DEFAULT_TRANSITION_TIME_MS);
4038 		if (!rc)
4039 			DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", bl_idx);
4040 	} else {
4041 		rc = dc_link_set_backlight_level(link, brightness, 0);
4042 		if (!rc)
4043 			DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx);
4044 	}
4045 
4046 	if (rc)
4047 		dm->actual_brightness[bl_idx] = user_brightness;
4048 }
4049 
4050 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
4051 {
4052 	struct amdgpu_display_manager *dm = bl_get_data(bd);
4053 	int i;
4054 
4055 	for (i = 0; i < dm->num_of_edps; i++) {
4056 		if (bd == dm->backlight_dev[i])
4057 			break;
4058 	}
4059 	if (i >= AMDGPU_DM_MAX_NUM_EDP)
4060 		i = 0;
4061 	amdgpu_dm_backlight_set_level(dm, i, bd->props.brightness);
4062 
4063 	return 0;
4064 }
4065 
4066 static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm,
4067 					 int bl_idx)
4068 {
4069 	struct amdgpu_dm_backlight_caps caps;
4070 	struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx];
4071 
4072 	amdgpu_dm_update_backlight_caps(dm, bl_idx);
4073 	caps = dm->backlight_caps[bl_idx];
4074 
4075 	if (caps.aux_support) {
4076 		u32 avg, peak;
4077 		bool rc;
4078 
4079 		rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
4080 		if (!rc)
4081 			return dm->brightness[bl_idx];
4082 		return convert_brightness_to_user(&caps, avg);
4083 	} else {
4084 		int ret = dc_link_get_backlight_level(link);
4085 
4086 		if (ret == DC_ERROR_UNEXPECTED)
4087 			return dm->brightness[bl_idx];
4088 		return convert_brightness_to_user(&caps, ret);
4089 	}
4090 }
4091 
4092 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
4093 {
4094 	struct amdgpu_display_manager *dm = bl_get_data(bd);
4095 	int i;
4096 
4097 	for (i = 0; i < dm->num_of_edps; i++) {
4098 		if (bd == dm->backlight_dev[i])
4099 			break;
4100 	}
4101 	if (i >= AMDGPU_DM_MAX_NUM_EDP)
4102 		i = 0;
4103 	return amdgpu_dm_backlight_get_level(dm, i);
4104 }
4105 
4106 static const struct backlight_ops amdgpu_dm_backlight_ops = {
4107 	.options = BL_CORE_SUSPENDRESUME,
4108 	.get_brightness = amdgpu_dm_backlight_get_brightness,
4109 	.update_status	= amdgpu_dm_backlight_update_status,
4110 };
4111 
4112 static void
4113 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
4114 {
4115 	char bl_name[16];
4116 	struct backlight_properties props = { 0 };
4117 
4118 	amdgpu_dm_update_backlight_caps(dm, dm->num_of_edps);
4119 	dm->brightness[dm->num_of_edps] = AMDGPU_MAX_BL_LEVEL;
4120 
4121 	props.max_brightness = AMDGPU_MAX_BL_LEVEL;
4122 	props.brightness = AMDGPU_MAX_BL_LEVEL;
4123 	props.type = BACKLIGHT_RAW;
4124 
4125 	snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
4126 		 adev_to_drm(dm->adev)->primary->index + dm->num_of_edps);
4127 
4128 	dm->backlight_dev[dm->num_of_edps] = backlight_device_register(bl_name,
4129 								       adev_to_drm(dm->adev)->dev,
4130 								       dm,
4131 								       &amdgpu_dm_backlight_ops,
4132 								       &props);
4133 
4134 	if (IS_ERR(dm->backlight_dev[dm->num_of_edps]))
4135 		DRM_ERROR("DM: Backlight registration failed!\n");
4136 	else
4137 		DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
4138 }
4139 
4140 static int initialize_plane(struct amdgpu_display_manager *dm,
4141 			    struct amdgpu_mode_info *mode_info, int plane_id,
4142 			    enum drm_plane_type plane_type,
4143 			    const struct dc_plane_cap *plane_cap)
4144 {
4145 	struct drm_plane *plane;
4146 	unsigned long possible_crtcs;
4147 	int ret = 0;
4148 
4149 	plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
4150 	if (!plane) {
4151 		DRM_ERROR("KMS: Failed to allocate plane\n");
4152 		return -ENOMEM;
4153 	}
4154 	plane->type = plane_type;
4155 
4156 	/*
4157 	 * HACK: IGT tests expect that the primary plane for a CRTC
4158 	 * can only have one possible CRTC. Only expose support for
4159 	 * any CRTC if they're not going to be used as a primary plane
4160 	 * for a CRTC - like overlay or underlay planes.
4161 	 */
4162 	possible_crtcs = 1 << plane_id;
4163 	if (plane_id >= dm->dc->caps.max_streams)
4164 		possible_crtcs = 0xff;
4165 
4166 	ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
4167 
4168 	if (ret) {
4169 		DRM_ERROR("KMS: Failed to initialize plane\n");
4170 		kfree(plane);
4171 		return ret;
4172 	}
4173 
4174 	if (mode_info)
4175 		mode_info->planes[plane_id] = plane;
4176 
4177 	return ret;
4178 }
4179 
4180 
4181 static void register_backlight_device(struct amdgpu_display_manager *dm,
4182 				      struct dc_link *link)
4183 {
4184 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
4185 	    link->type != dc_connection_none) {
4186 		/*
4187 		 * Event if registration failed, we should continue with
4188 		 * DM initialization because not having a backlight control
4189 		 * is better then a black screen.
4190 		 */
4191 		if (!dm->backlight_dev[dm->num_of_edps])
4192 			amdgpu_dm_register_backlight_device(dm);
4193 
4194 		if (dm->backlight_dev[dm->num_of_edps]) {
4195 			dm->backlight_link[dm->num_of_edps] = link;
4196 			dm->num_of_edps++;
4197 		}
4198 	}
4199 }
4200 
4201 
4202 /*
4203  * In this architecture, the association
4204  * connector -> encoder -> crtc
4205  * id not really requried. The crtc and connector will hold the
4206  * display_index as an abstraction to use with DAL component
4207  *
4208  * Returns 0 on success
4209  */
4210 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
4211 {
4212 	struct amdgpu_display_manager *dm = &adev->dm;
4213 	int32_t i;
4214 	struct amdgpu_dm_connector *aconnector = NULL;
4215 	struct amdgpu_encoder *aencoder = NULL;
4216 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
4217 	uint32_t link_cnt;
4218 	int32_t primary_planes;
4219 	enum dc_connection_type new_connection_type = dc_connection_none;
4220 	const struct dc_plane_cap *plane;
4221 	bool psr_feature_enabled = false;
4222 
4223 	dm->display_indexes_num = dm->dc->caps.max_streams;
4224 	/* Update the actual used number of crtc */
4225 	adev->mode_info.num_crtc = adev->dm.display_indexes_num;
4226 
4227 	link_cnt = dm->dc->caps.max_links;
4228 	if (amdgpu_dm_mode_config_init(dm->adev)) {
4229 		DRM_ERROR("DM: Failed to initialize mode config\n");
4230 		return -EINVAL;
4231 	}
4232 
4233 	/* There is one primary plane per CRTC */
4234 	primary_planes = dm->dc->caps.max_streams;
4235 	ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
4236 
4237 	/*
4238 	 * Initialize primary planes, implicit planes for legacy IOCTLS.
4239 	 * Order is reversed to match iteration order in atomic check.
4240 	 */
4241 	for (i = (primary_planes - 1); i >= 0; i--) {
4242 		plane = &dm->dc->caps.planes[i];
4243 
4244 		if (initialize_plane(dm, mode_info, i,
4245 				     DRM_PLANE_TYPE_PRIMARY, plane)) {
4246 			DRM_ERROR("KMS: Failed to initialize primary plane\n");
4247 			goto fail;
4248 		}
4249 	}
4250 
4251 	/*
4252 	 * Initialize overlay planes, index starting after primary planes.
4253 	 * These planes have a higher DRM index than the primary planes since
4254 	 * they should be considered as having a higher z-order.
4255 	 * Order is reversed to match iteration order in atomic check.
4256 	 *
4257 	 * Only support DCN for now, and only expose one so we don't encourage
4258 	 * userspace to use up all the pipes.
4259 	 */
4260 	for (i = 0; i < dm->dc->caps.max_planes; ++i) {
4261 		struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
4262 
4263 		if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
4264 			continue;
4265 
4266 		if (!plane->blends_with_above || !plane->blends_with_below)
4267 			continue;
4268 
4269 		if (!plane->pixel_format_support.argb8888)
4270 			continue;
4271 
4272 		if (initialize_plane(dm, NULL, primary_planes + i,
4273 				     DRM_PLANE_TYPE_OVERLAY, plane)) {
4274 			DRM_ERROR("KMS: Failed to initialize overlay plane\n");
4275 			goto fail;
4276 		}
4277 
4278 		/* Only create one overlay plane. */
4279 		break;
4280 	}
4281 
4282 	for (i = 0; i < dm->dc->caps.max_streams; i++)
4283 		if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
4284 			DRM_ERROR("KMS: Failed to initialize crtc\n");
4285 			goto fail;
4286 		}
4287 
4288 	/* Use Outbox interrupt */
4289 	switch (adev->ip_versions[DCE_HWIP][0]) {
4290 	case IP_VERSION(3, 0, 0):
4291 	case IP_VERSION(3, 1, 2):
4292 	case IP_VERSION(3, 1, 3):
4293 	case IP_VERSION(3, 1, 5):
4294 	case IP_VERSION(3, 1, 6):
4295 	case IP_VERSION(3, 2, 0):
4296 	case IP_VERSION(3, 2, 1):
4297 	case IP_VERSION(2, 1, 0):
4298 		if (register_outbox_irq_handlers(dm->adev)) {
4299 			DRM_ERROR("DM: Failed to initialize IRQ\n");
4300 			goto fail;
4301 		}
4302 		break;
4303 	default:
4304 		DRM_DEBUG_KMS("Unsupported DCN IP version for outbox: 0x%X\n",
4305 			      adev->ip_versions[DCE_HWIP][0]);
4306 	}
4307 
4308 	/* Determine whether to enable PSR support by default. */
4309 	if (!(amdgpu_dc_debug_mask & DC_DISABLE_PSR)) {
4310 		switch (adev->ip_versions[DCE_HWIP][0]) {
4311 		case IP_VERSION(3, 1, 2):
4312 		case IP_VERSION(3, 1, 3):
4313 		case IP_VERSION(3, 1, 5):
4314 		case IP_VERSION(3, 1, 6):
4315 		case IP_VERSION(3, 2, 0):
4316 		case IP_VERSION(3, 2, 1):
4317 			psr_feature_enabled = true;
4318 			break;
4319 		default:
4320 			psr_feature_enabled = amdgpu_dc_feature_mask & DC_PSR_MASK;
4321 			break;
4322 		}
4323 	}
4324 
4325 	/* loops over all connectors on the board */
4326 	for (i = 0; i < link_cnt; i++) {
4327 		struct dc_link *link = NULL;
4328 
4329 		if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
4330 			DRM_ERROR(
4331 				"KMS: Cannot support more than %d display indexes\n",
4332 					AMDGPU_DM_MAX_DISPLAY_INDEX);
4333 			continue;
4334 		}
4335 
4336 		aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
4337 		if (!aconnector)
4338 			goto fail;
4339 
4340 		aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
4341 		if (!aencoder)
4342 			goto fail;
4343 
4344 		if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
4345 			DRM_ERROR("KMS: Failed to initialize encoder\n");
4346 			goto fail;
4347 		}
4348 
4349 		if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
4350 			DRM_ERROR("KMS: Failed to initialize connector\n");
4351 			goto fail;
4352 		}
4353 
4354 		link = dc_get_link_at_index(dm->dc, i);
4355 
4356 		if (!dc_link_detect_sink(link, &new_connection_type))
4357 			DRM_ERROR("KMS: Failed to detect connector\n");
4358 
4359 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
4360 			emulated_link_detect(link);
4361 			amdgpu_dm_update_connector_after_detect(aconnector);
4362 		} else {
4363 			bool ret = false;
4364 
4365 			mutex_lock(&dm->dc_lock);
4366 			ret = dc_link_detect(link, DETECT_REASON_BOOT);
4367 			mutex_unlock(&dm->dc_lock);
4368 
4369 			if (ret) {
4370 				amdgpu_dm_update_connector_after_detect(aconnector);
4371 				register_backlight_device(dm, link);
4372 
4373 				if (dm->num_of_edps)
4374 					update_connector_ext_caps(aconnector);
4375 
4376 				if (psr_feature_enabled)
4377 					amdgpu_dm_set_psr_caps(link);
4378 
4379 				/* TODO: Fix vblank control helpers to delay PSR entry to allow this when
4380 				 * PSR is also supported.
4381 				 */
4382 				if (link->psr_settings.psr_feature_enabled)
4383 					adev_to_drm(adev)->vblank_disable_immediate = false;
4384 			}
4385 		}
4386 	}
4387 
4388 	/* Software is initialized. Now we can register interrupt handlers. */
4389 	switch (adev->asic_type) {
4390 #if defined(CONFIG_DRM_AMD_DC_SI)
4391 	case CHIP_TAHITI:
4392 	case CHIP_PITCAIRN:
4393 	case CHIP_VERDE:
4394 	case CHIP_OLAND:
4395 		if (dce60_register_irq_handlers(dm->adev)) {
4396 			DRM_ERROR("DM: Failed to initialize IRQ\n");
4397 			goto fail;
4398 		}
4399 		break;
4400 #endif
4401 	case CHIP_BONAIRE:
4402 	case CHIP_HAWAII:
4403 	case CHIP_KAVERI:
4404 	case CHIP_KABINI:
4405 	case CHIP_MULLINS:
4406 	case CHIP_TONGA:
4407 	case CHIP_FIJI:
4408 	case CHIP_CARRIZO:
4409 	case CHIP_STONEY:
4410 	case CHIP_POLARIS11:
4411 	case CHIP_POLARIS10:
4412 	case CHIP_POLARIS12:
4413 	case CHIP_VEGAM:
4414 	case CHIP_VEGA10:
4415 	case CHIP_VEGA12:
4416 	case CHIP_VEGA20:
4417 		if (dce110_register_irq_handlers(dm->adev)) {
4418 			DRM_ERROR("DM: Failed to initialize IRQ\n");
4419 			goto fail;
4420 		}
4421 		break;
4422 	default:
4423 		switch (adev->ip_versions[DCE_HWIP][0]) {
4424 		case IP_VERSION(1, 0, 0):
4425 		case IP_VERSION(1, 0, 1):
4426 		case IP_VERSION(2, 0, 2):
4427 		case IP_VERSION(2, 0, 3):
4428 		case IP_VERSION(2, 0, 0):
4429 		case IP_VERSION(2, 1, 0):
4430 		case IP_VERSION(3, 0, 0):
4431 		case IP_VERSION(3, 0, 2):
4432 		case IP_VERSION(3, 0, 3):
4433 		case IP_VERSION(3, 0, 1):
4434 		case IP_VERSION(3, 1, 2):
4435 		case IP_VERSION(3, 1, 3):
4436 		case IP_VERSION(3, 1, 5):
4437 		case IP_VERSION(3, 1, 6):
4438 		case IP_VERSION(3, 2, 0):
4439 		case IP_VERSION(3, 2, 1):
4440 			if (dcn10_register_irq_handlers(dm->adev)) {
4441 				DRM_ERROR("DM: Failed to initialize IRQ\n");
4442 				goto fail;
4443 			}
4444 			break;
4445 		default:
4446 			DRM_ERROR("Unsupported DCE IP versions: 0x%X\n",
4447 					adev->ip_versions[DCE_HWIP][0]);
4448 			goto fail;
4449 		}
4450 		break;
4451 	}
4452 
4453 	return 0;
4454 fail:
4455 	kfree(aencoder);
4456 	kfree(aconnector);
4457 
4458 	return -EINVAL;
4459 }
4460 
4461 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
4462 {
4463 	drm_atomic_private_obj_fini(&dm->atomic_obj);
4464 	return;
4465 }
4466 
4467 /******************************************************************************
4468  * amdgpu_display_funcs functions
4469  *****************************************************************************/
4470 
4471 /*
4472  * dm_bandwidth_update - program display watermarks
4473  *
4474  * @adev: amdgpu_device pointer
4475  *
4476  * Calculate and program the display watermarks and line buffer allocation.
4477  */
4478 static void dm_bandwidth_update(struct amdgpu_device *adev)
4479 {
4480 	/* TODO: implement later */
4481 }
4482 
4483 static const struct amdgpu_display_funcs dm_display_funcs = {
4484 	.bandwidth_update = dm_bandwidth_update, /* called unconditionally */
4485 	.vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
4486 	.backlight_set_level = NULL, /* never called for DC */
4487 	.backlight_get_level = NULL, /* never called for DC */
4488 	.hpd_sense = NULL,/* called unconditionally */
4489 	.hpd_set_polarity = NULL, /* called unconditionally */
4490 	.hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
4491 	.page_flip_get_scanoutpos =
4492 		dm_crtc_get_scanoutpos,/* called unconditionally */
4493 	.add_encoder = NULL, /* VBIOS parsing. DAL does it. */
4494 	.add_connector = NULL, /* VBIOS parsing. DAL does it. */
4495 };
4496 
4497 #if defined(CONFIG_DEBUG_KERNEL_DC)
4498 
4499 static ssize_t s3_debug_store(struct device *device,
4500 			      struct device_attribute *attr,
4501 			      const char *buf,
4502 			      size_t count)
4503 {
4504 	int ret;
4505 	int s3_state;
4506 	struct drm_device *drm_dev = dev_get_drvdata(device);
4507 	struct amdgpu_device *adev = drm_to_adev(drm_dev);
4508 
4509 	ret = kstrtoint(buf, 0, &s3_state);
4510 
4511 	if (ret == 0) {
4512 		if (s3_state) {
4513 			dm_resume(adev);
4514 			drm_kms_helper_hotplug_event(adev_to_drm(adev));
4515 		} else
4516 			dm_suspend(adev);
4517 	}
4518 
4519 	return ret == 0 ? count : 0;
4520 }
4521 
4522 DEVICE_ATTR_WO(s3_debug);
4523 
4524 #endif
4525 
4526 static int dm_early_init(void *handle)
4527 {
4528 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4529 
4530 	switch (adev->asic_type) {
4531 #if defined(CONFIG_DRM_AMD_DC_SI)
4532 	case CHIP_TAHITI:
4533 	case CHIP_PITCAIRN:
4534 	case CHIP_VERDE:
4535 		adev->mode_info.num_crtc = 6;
4536 		adev->mode_info.num_hpd = 6;
4537 		adev->mode_info.num_dig = 6;
4538 		break;
4539 	case CHIP_OLAND:
4540 		adev->mode_info.num_crtc = 2;
4541 		adev->mode_info.num_hpd = 2;
4542 		adev->mode_info.num_dig = 2;
4543 		break;
4544 #endif
4545 	case CHIP_BONAIRE:
4546 	case CHIP_HAWAII:
4547 		adev->mode_info.num_crtc = 6;
4548 		adev->mode_info.num_hpd = 6;
4549 		adev->mode_info.num_dig = 6;
4550 		break;
4551 	case CHIP_KAVERI:
4552 		adev->mode_info.num_crtc = 4;
4553 		adev->mode_info.num_hpd = 6;
4554 		adev->mode_info.num_dig = 7;
4555 		break;
4556 	case CHIP_KABINI:
4557 	case CHIP_MULLINS:
4558 		adev->mode_info.num_crtc = 2;
4559 		adev->mode_info.num_hpd = 6;
4560 		adev->mode_info.num_dig = 6;
4561 		break;
4562 	case CHIP_FIJI:
4563 	case CHIP_TONGA:
4564 		adev->mode_info.num_crtc = 6;
4565 		adev->mode_info.num_hpd = 6;
4566 		adev->mode_info.num_dig = 7;
4567 		break;
4568 	case CHIP_CARRIZO:
4569 		adev->mode_info.num_crtc = 3;
4570 		adev->mode_info.num_hpd = 6;
4571 		adev->mode_info.num_dig = 9;
4572 		break;
4573 	case CHIP_STONEY:
4574 		adev->mode_info.num_crtc = 2;
4575 		adev->mode_info.num_hpd = 6;
4576 		adev->mode_info.num_dig = 9;
4577 		break;
4578 	case CHIP_POLARIS11:
4579 	case CHIP_POLARIS12:
4580 		adev->mode_info.num_crtc = 5;
4581 		adev->mode_info.num_hpd = 5;
4582 		adev->mode_info.num_dig = 5;
4583 		break;
4584 	case CHIP_POLARIS10:
4585 	case CHIP_VEGAM:
4586 		adev->mode_info.num_crtc = 6;
4587 		adev->mode_info.num_hpd = 6;
4588 		adev->mode_info.num_dig = 6;
4589 		break;
4590 	case CHIP_VEGA10:
4591 	case CHIP_VEGA12:
4592 	case CHIP_VEGA20:
4593 		adev->mode_info.num_crtc = 6;
4594 		adev->mode_info.num_hpd = 6;
4595 		adev->mode_info.num_dig = 6;
4596 		break;
4597 	default:
4598 
4599 		switch (adev->ip_versions[DCE_HWIP][0]) {
4600 		case IP_VERSION(2, 0, 2):
4601 		case IP_VERSION(3, 0, 0):
4602 			adev->mode_info.num_crtc = 6;
4603 			adev->mode_info.num_hpd = 6;
4604 			adev->mode_info.num_dig = 6;
4605 			break;
4606 		case IP_VERSION(2, 0, 0):
4607 		case IP_VERSION(3, 0, 2):
4608 			adev->mode_info.num_crtc = 5;
4609 			adev->mode_info.num_hpd = 5;
4610 			adev->mode_info.num_dig = 5;
4611 			break;
4612 		case IP_VERSION(2, 0, 3):
4613 		case IP_VERSION(3, 0, 3):
4614 			adev->mode_info.num_crtc = 2;
4615 			adev->mode_info.num_hpd = 2;
4616 			adev->mode_info.num_dig = 2;
4617 			break;
4618 		case IP_VERSION(1, 0, 0):
4619 		case IP_VERSION(1, 0, 1):
4620 		case IP_VERSION(3, 0, 1):
4621 		case IP_VERSION(2, 1, 0):
4622 		case IP_VERSION(3, 1, 2):
4623 		case IP_VERSION(3, 1, 3):
4624 		case IP_VERSION(3, 1, 5):
4625 		case IP_VERSION(3, 1, 6):
4626 		case IP_VERSION(3, 2, 0):
4627 		case IP_VERSION(3, 2, 1):
4628 			adev->mode_info.num_crtc = 4;
4629 			adev->mode_info.num_hpd = 4;
4630 			adev->mode_info.num_dig = 4;
4631 			break;
4632 		default:
4633 			DRM_ERROR("Unsupported DCE IP versions: 0x%x\n",
4634 					adev->ip_versions[DCE_HWIP][0]);
4635 			return -EINVAL;
4636 		}
4637 		break;
4638 	}
4639 
4640 	amdgpu_dm_set_irq_funcs(adev);
4641 
4642 	if (adev->mode_info.funcs == NULL)
4643 		adev->mode_info.funcs = &dm_display_funcs;
4644 
4645 	/*
4646 	 * Note: Do NOT change adev->audio_endpt_rreg and
4647 	 * adev->audio_endpt_wreg because they are initialised in
4648 	 * amdgpu_device_init()
4649 	 */
4650 #if defined(CONFIG_DEBUG_KERNEL_DC)
4651 	device_create_file(
4652 		adev_to_drm(adev)->dev,
4653 		&dev_attr_s3_debug);
4654 #endif
4655 
4656 	return 0;
4657 }
4658 
4659 static bool modeset_required(struct drm_crtc_state *crtc_state,
4660 			     struct dc_stream_state *new_stream,
4661 			     struct dc_stream_state *old_stream)
4662 {
4663 	return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4664 }
4665 
4666 static bool modereset_required(struct drm_crtc_state *crtc_state)
4667 {
4668 	return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4669 }
4670 
4671 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
4672 {
4673 	drm_encoder_cleanup(encoder);
4674 	kfree(encoder);
4675 }
4676 
4677 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
4678 	.destroy = amdgpu_dm_encoder_destroy,
4679 };
4680 
4681 
4682 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
4683 					 struct drm_framebuffer *fb,
4684 					 int *min_downscale, int *max_upscale)
4685 {
4686 	struct amdgpu_device *adev = drm_to_adev(dev);
4687 	struct dc *dc = adev->dm.dc;
4688 	/* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
4689 	struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
4690 
4691 	switch (fb->format->format) {
4692 	case DRM_FORMAT_P010:
4693 	case DRM_FORMAT_NV12:
4694 	case DRM_FORMAT_NV21:
4695 		*max_upscale = plane_cap->max_upscale_factor.nv12;
4696 		*min_downscale = plane_cap->max_downscale_factor.nv12;
4697 		break;
4698 
4699 	case DRM_FORMAT_XRGB16161616F:
4700 	case DRM_FORMAT_ARGB16161616F:
4701 	case DRM_FORMAT_XBGR16161616F:
4702 	case DRM_FORMAT_ABGR16161616F:
4703 		*max_upscale = plane_cap->max_upscale_factor.fp16;
4704 		*min_downscale = plane_cap->max_downscale_factor.fp16;
4705 		break;
4706 
4707 	default:
4708 		*max_upscale = plane_cap->max_upscale_factor.argb8888;
4709 		*min_downscale = plane_cap->max_downscale_factor.argb8888;
4710 		break;
4711 	}
4712 
4713 	/*
4714 	 * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
4715 	 * scaling factor of 1.0 == 1000 units.
4716 	 */
4717 	if (*max_upscale == 1)
4718 		*max_upscale = 1000;
4719 
4720 	if (*min_downscale == 1)
4721 		*min_downscale = 1000;
4722 }
4723 
4724 
4725 static int fill_dc_scaling_info(struct amdgpu_device *adev,
4726 				const struct drm_plane_state *state,
4727 				struct dc_scaling_info *scaling_info)
4728 {
4729 	int scale_w, scale_h, min_downscale, max_upscale;
4730 
4731 	memset(scaling_info, 0, sizeof(*scaling_info));
4732 
4733 	/* Source is fixed 16.16 but we ignore mantissa for now... */
4734 	scaling_info->src_rect.x = state->src_x >> 16;
4735 	scaling_info->src_rect.y = state->src_y >> 16;
4736 
4737 	/*
4738 	 * For reasons we don't (yet) fully understand a non-zero
4739 	 * src_y coordinate into an NV12 buffer can cause a
4740 	 * system hang on DCN1x.
4741 	 * To avoid hangs (and maybe be overly cautious)
4742 	 * let's reject both non-zero src_x and src_y.
4743 	 *
4744 	 * We currently know of only one use-case to reproduce a
4745 	 * scenario with non-zero src_x and src_y for NV12, which
4746 	 * is to gesture the YouTube Android app into full screen
4747 	 * on ChromeOS.
4748 	 */
4749 	if (((adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 0)) ||
4750 	    (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 1))) &&
4751 	    (state->fb && state->fb->format->format == DRM_FORMAT_NV12 &&
4752 	    (scaling_info->src_rect.x != 0 || scaling_info->src_rect.y != 0)))
4753 		return -EINVAL;
4754 
4755 	scaling_info->src_rect.width = state->src_w >> 16;
4756 	if (scaling_info->src_rect.width == 0)
4757 		return -EINVAL;
4758 
4759 	scaling_info->src_rect.height = state->src_h >> 16;
4760 	if (scaling_info->src_rect.height == 0)
4761 		return -EINVAL;
4762 
4763 	scaling_info->dst_rect.x = state->crtc_x;
4764 	scaling_info->dst_rect.y = state->crtc_y;
4765 
4766 	if (state->crtc_w == 0)
4767 		return -EINVAL;
4768 
4769 	scaling_info->dst_rect.width = state->crtc_w;
4770 
4771 	if (state->crtc_h == 0)
4772 		return -EINVAL;
4773 
4774 	scaling_info->dst_rect.height = state->crtc_h;
4775 
4776 	/* DRM doesn't specify clipping on destination output. */
4777 	scaling_info->clip_rect = scaling_info->dst_rect;
4778 
4779 	/* Validate scaling per-format with DC plane caps */
4780 	if (state->plane && state->plane->dev && state->fb) {
4781 		get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
4782 					     &min_downscale, &max_upscale);
4783 	} else {
4784 		min_downscale = 250;
4785 		max_upscale = 16000;
4786 	}
4787 
4788 	scale_w = scaling_info->dst_rect.width * 1000 /
4789 		  scaling_info->src_rect.width;
4790 
4791 	if (scale_w < min_downscale || scale_w > max_upscale)
4792 		return -EINVAL;
4793 
4794 	scale_h = scaling_info->dst_rect.height * 1000 /
4795 		  scaling_info->src_rect.height;
4796 
4797 	if (scale_h < min_downscale || scale_h > max_upscale)
4798 		return -EINVAL;
4799 
4800 	/*
4801 	 * The "scaling_quality" can be ignored for now, quality = 0 has DC
4802 	 * assume reasonable defaults based on the format.
4803 	 */
4804 
4805 	return 0;
4806 }
4807 
4808 static void
4809 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
4810 				 uint64_t tiling_flags)
4811 {
4812 	/* Fill GFX8 params */
4813 	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
4814 		unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
4815 
4816 		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
4817 		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
4818 		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
4819 		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
4820 		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
4821 
4822 		/* XXX fix me for VI */
4823 		tiling_info->gfx8.num_banks = num_banks;
4824 		tiling_info->gfx8.array_mode =
4825 				DC_ARRAY_2D_TILED_THIN1;
4826 		tiling_info->gfx8.tile_split = tile_split;
4827 		tiling_info->gfx8.bank_width = bankw;
4828 		tiling_info->gfx8.bank_height = bankh;
4829 		tiling_info->gfx8.tile_aspect = mtaspect;
4830 		tiling_info->gfx8.tile_mode =
4831 				DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4832 	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4833 			== DC_ARRAY_1D_TILED_THIN1) {
4834 		tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
4835 	}
4836 
4837 	tiling_info->gfx8.pipe_config =
4838 			AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
4839 }
4840 
4841 static void
4842 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4843 				  union dc_tiling_info *tiling_info)
4844 {
4845 	tiling_info->gfx9.num_pipes =
4846 		adev->gfx.config.gb_addr_config_fields.num_pipes;
4847 	tiling_info->gfx9.num_banks =
4848 		adev->gfx.config.gb_addr_config_fields.num_banks;
4849 	tiling_info->gfx9.pipe_interleave =
4850 		adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4851 	tiling_info->gfx9.num_shader_engines =
4852 		adev->gfx.config.gb_addr_config_fields.num_se;
4853 	tiling_info->gfx9.max_compressed_frags =
4854 		adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4855 	tiling_info->gfx9.num_rb_per_se =
4856 		adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4857 	tiling_info->gfx9.shaderEnable = 1;
4858 	if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
4859 		tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
4860 }
4861 
4862 static int
4863 validate_dcc(struct amdgpu_device *adev,
4864 	     const enum surface_pixel_format format,
4865 	     const enum dc_rotation_angle rotation,
4866 	     const union dc_tiling_info *tiling_info,
4867 	     const struct dc_plane_dcc_param *dcc,
4868 	     const struct dc_plane_address *address,
4869 	     const struct plane_size *plane_size)
4870 {
4871 	struct dc *dc = adev->dm.dc;
4872 	struct dc_dcc_surface_param input;
4873 	struct dc_surface_dcc_cap output;
4874 
4875 	memset(&input, 0, sizeof(input));
4876 	memset(&output, 0, sizeof(output));
4877 
4878 	if (!dcc->enable)
4879 		return 0;
4880 
4881 	if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4882 	    !dc->cap_funcs.get_dcc_compression_cap)
4883 		return -EINVAL;
4884 
4885 	input.format = format;
4886 	input.surface_size.width = plane_size->surface_size.width;
4887 	input.surface_size.height = plane_size->surface_size.height;
4888 	input.swizzle_mode = tiling_info->gfx9.swizzle;
4889 
4890 	if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
4891 		input.scan = SCAN_DIRECTION_HORIZONTAL;
4892 	else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
4893 		input.scan = SCAN_DIRECTION_VERTICAL;
4894 
4895 	if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
4896 		return -EINVAL;
4897 
4898 	if (!output.capable)
4899 		return -EINVAL;
4900 
4901 	if (dcc->independent_64b_blks == 0 &&
4902 	    output.grph.rgb.independent_64b_blks != 0)
4903 		return -EINVAL;
4904 
4905 	return 0;
4906 }
4907 
4908 static bool
4909 modifier_has_dcc(uint64_t modifier)
4910 {
4911 	return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4912 }
4913 
4914 static unsigned
4915 modifier_gfx9_swizzle_mode(uint64_t modifier)
4916 {
4917 	if (modifier == DRM_FORMAT_MOD_LINEAR)
4918 		return 0;
4919 
4920 	return AMD_FMT_MOD_GET(TILE, modifier);
4921 }
4922 
4923 static const struct drm_format_info *
4924 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4925 {
4926 	return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
4927 }
4928 
4929 static void
4930 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4931 				    union dc_tiling_info *tiling_info,
4932 				    uint64_t modifier)
4933 {
4934 	unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4935 	unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4936 	unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4937 	unsigned int pipes_log2;
4938 
4939 	pipes_log2 = min(5u, mod_pipe_xor_bits);
4940 
4941 	fill_gfx9_tiling_info_from_device(adev, tiling_info);
4942 
4943 	if (!IS_AMD_FMT_MOD(modifier))
4944 		return;
4945 
4946 	tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4947 	tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4948 
4949 	if (adev->family >= AMDGPU_FAMILY_NV) {
4950 		tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4951 	} else {
4952 		tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4953 
4954 		/* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4955 	}
4956 }
4957 
4958 enum dm_micro_swizzle {
4959 	MICRO_SWIZZLE_Z = 0,
4960 	MICRO_SWIZZLE_S = 1,
4961 	MICRO_SWIZZLE_D = 2,
4962 	MICRO_SWIZZLE_R = 3
4963 };
4964 
4965 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4966 					  uint32_t format,
4967 					  uint64_t modifier)
4968 {
4969 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
4970 	const struct drm_format_info *info = drm_format_info(format);
4971 	int i;
4972 
4973 	enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4974 
4975 	if (!info)
4976 		return false;
4977 
4978 	/*
4979 	 * We always have to allow these modifiers:
4980 	 * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers.
4981 	 * 2. Not passing any modifiers is the same as explicitly passing INVALID.
4982 	 */
4983 	if (modifier == DRM_FORMAT_MOD_LINEAR ||
4984 	    modifier == DRM_FORMAT_MOD_INVALID) {
4985 		return true;
4986 	}
4987 
4988 	/* Check that the modifier is on the list of the plane's supported modifiers. */
4989 	for (i = 0; i < plane->modifier_count; i++) {
4990 		if (modifier == plane->modifiers[i])
4991 			break;
4992 	}
4993 	if (i == plane->modifier_count)
4994 		return false;
4995 
4996 	/*
4997 	 * For D swizzle the canonical modifier depends on the bpp, so check
4998 	 * it here.
4999 	 */
5000 	if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
5001 	    adev->family >= AMDGPU_FAMILY_NV) {
5002 		if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
5003 			return false;
5004 	}
5005 
5006 	if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
5007 	    info->cpp[0] < 8)
5008 		return false;
5009 
5010 	if (modifier_has_dcc(modifier)) {
5011 		/* Per radeonsi comments 16/64 bpp are more complicated. */
5012 		if (info->cpp[0] != 4)
5013 			return false;
5014 		/* We support multi-planar formats, but not when combined with
5015 		 * additional DCC metadata planes. */
5016 		if (info->num_planes > 1)
5017 			return false;
5018 	}
5019 
5020 	return true;
5021 }
5022 
5023 static void
5024 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
5025 {
5026 	if (!*mods)
5027 		return;
5028 
5029 	if (*cap - *size < 1) {
5030 		uint64_t new_cap = *cap * 2;
5031 		uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
5032 
5033 		if (!new_mods) {
5034 			kfree(*mods);
5035 			*mods = NULL;
5036 			return;
5037 		}
5038 
5039 		memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
5040 		kfree(*mods);
5041 		*mods = new_mods;
5042 		*cap = new_cap;
5043 	}
5044 
5045 	(*mods)[*size] = mod;
5046 	*size += 1;
5047 }
5048 
5049 static void
5050 add_gfx9_modifiers(const struct amdgpu_device *adev,
5051 		   uint64_t **mods, uint64_t *size, uint64_t *capacity)
5052 {
5053 	int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5054 	int pipe_xor_bits = min(8, pipes +
5055 				ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
5056 	int bank_xor_bits = min(8 - pipe_xor_bits,
5057 				ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
5058 	int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
5059 		 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
5060 
5061 
5062 	if (adev->family == AMDGPU_FAMILY_RV) {
5063 		/* Raven2 and later */
5064 		bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
5065 
5066 		/*
5067 		 * No _D DCC swizzles yet because we only allow 32bpp, which
5068 		 * doesn't support _D on DCN
5069 		 */
5070 
5071 		if (has_constant_encode) {
5072 			add_modifier(mods, size, capacity, AMD_FMT_MOD |
5073 				    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5074 				    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5075 				    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5076 				    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5077 				    AMD_FMT_MOD_SET(DCC, 1) |
5078 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5079 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5080 				    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
5081 		}
5082 
5083 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
5084 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5085 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5086 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5087 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5088 			    AMD_FMT_MOD_SET(DCC, 1) |
5089 			    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5090 			    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5091 			    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
5092 
5093 		if (has_constant_encode) {
5094 			add_modifier(mods, size, capacity, AMD_FMT_MOD |
5095 				    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5096 				    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5097 				    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5098 				    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5099 				    AMD_FMT_MOD_SET(DCC, 1) |
5100 				    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5101 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5102 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5103 
5104 				    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5105 				    AMD_FMT_MOD_SET(RB, rb) |
5106 				    AMD_FMT_MOD_SET(PIPE, pipes));
5107 		}
5108 
5109 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
5110 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5111 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5112 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5113 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5114 			    AMD_FMT_MOD_SET(DCC, 1) |
5115 			    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5116 			    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5117 			    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5118 			    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
5119 			    AMD_FMT_MOD_SET(RB, rb) |
5120 			    AMD_FMT_MOD_SET(PIPE, pipes));
5121 	}
5122 
5123 	/*
5124 	 * Only supported for 64bpp on Raven, will be filtered on format in
5125 	 * dm_plane_format_mod_supported.
5126 	 */
5127 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5128 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
5129 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5130 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5131 		    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
5132 
5133 	if (adev->family == AMDGPU_FAMILY_RV) {
5134 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
5135 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5136 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5137 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5138 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
5139 	}
5140 
5141 	/*
5142 	 * Only supported for 64bpp on Raven, will be filtered on format in
5143 	 * dm_plane_format_mod_supported.
5144 	 */
5145 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5146 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5147 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5148 
5149 	if (adev->family == AMDGPU_FAMILY_RV) {
5150 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
5151 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5152 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5153 	}
5154 }
5155 
5156 static void
5157 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
5158 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
5159 {
5160 	int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5161 
5162 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5163 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5164 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5165 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5166 		    AMD_FMT_MOD_SET(DCC, 1) |
5167 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5168 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5169 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5170 
5171 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5172 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5173 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5174 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5175 		    AMD_FMT_MOD_SET(DCC, 1) |
5176 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5177 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5178 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5179 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5180 
5181 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5182 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5183 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5184 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5185 
5186 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5187 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5188 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5189 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5190 
5191 
5192 	/* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5193 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5194 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5195 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5196 
5197 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5198 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5199 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5200 }
5201 
5202 static void
5203 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
5204 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
5205 {
5206 	int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5207 	int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
5208 
5209 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5210 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5211 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5212 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5213 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
5214 		    AMD_FMT_MOD_SET(DCC, 1) |
5215 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5216 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5217 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5218 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5219 
5220 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5221 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5222 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5223 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5224 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
5225 		    AMD_FMT_MOD_SET(DCC, 1) |
5226 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5227 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5228 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5229 
5230 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5231 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5232 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5233 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5234 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
5235 		    AMD_FMT_MOD_SET(DCC, 1) |
5236 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5237 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5238 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5239 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5240 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5241 
5242 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5243 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5244 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5245 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5246 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
5247 		    AMD_FMT_MOD_SET(DCC, 1) |
5248 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5249 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5250 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5251 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5252 
5253 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5254 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5255 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5256 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5257 		    AMD_FMT_MOD_SET(PACKERS, pkrs));
5258 
5259 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5260 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5261 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5262 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5263 		    AMD_FMT_MOD_SET(PACKERS, pkrs));
5264 
5265 	/* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5266 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5267 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5268 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5269 
5270 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5271 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5272 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5273 }
5274 
5275 static void
5276 add_gfx11_modifiers(struct amdgpu_device *adev,
5277 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
5278 {
5279 	int num_pipes = 0;
5280 	int pipe_xor_bits = 0;
5281 	int num_pkrs = 0;
5282 	int pkrs = 0;
5283 	u32 gb_addr_config;
5284 	u8 i = 0;
5285 	unsigned swizzle_r_x;
5286 	uint64_t modifier_r_x;
5287 	uint64_t modifier_dcc_best;
5288 	uint64_t modifier_dcc_4k;
5289 
5290 	/* TODO: GFX11 IP HW init hasnt finish and we get zero if we read from
5291 	 * adev->gfx.config.gb_addr_config_fields.num_{pkrs,pipes} */
5292 	gb_addr_config = RREG32_SOC15(GC, 0, regGB_ADDR_CONFIG);
5293 	ASSERT(gb_addr_config != 0);
5294 
5295 	num_pkrs = 1 << REG_GET_FIELD(gb_addr_config, GB_ADDR_CONFIG, NUM_PKRS);
5296 	pkrs = ilog2(num_pkrs);
5297 	num_pipes = 1 << REG_GET_FIELD(gb_addr_config, GB_ADDR_CONFIG, NUM_PIPES);
5298 	pipe_xor_bits = ilog2(num_pipes);
5299 
5300 	for (i = 0; i < 2; i++) {
5301 		/* Insert the best one first. */
5302 		/* R_X swizzle modes are the best for rendering and DCC requires them. */
5303 		if (num_pipes > 16)
5304 			swizzle_r_x = !i ? AMD_FMT_MOD_TILE_GFX11_256K_R_X : AMD_FMT_MOD_TILE_GFX9_64K_R_X;
5305 		else
5306 			swizzle_r_x = !i ? AMD_FMT_MOD_TILE_GFX9_64K_R_X : AMD_FMT_MOD_TILE_GFX11_256K_R_X;
5307 
5308 		modifier_r_x = AMD_FMT_MOD |
5309 			       AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX11) |
5310 			       AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5311 			       AMD_FMT_MOD_SET(TILE, swizzle_r_x) |
5312 			       AMD_FMT_MOD_SET(PACKERS, pkrs);
5313 
5314 		/* DCC_CONSTANT_ENCODE is not set because it can't vary with gfx11 (it's implied to be 1). */
5315 		modifier_dcc_best = modifier_r_x | AMD_FMT_MOD_SET(DCC, 1) |
5316 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 0) |
5317 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5318 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B);
5319 
5320 		/* DCC settings for 4K and greater resolutions. (required by display hw) */
5321 		modifier_dcc_4k = modifier_r_x | AMD_FMT_MOD_SET(DCC, 1) |
5322 				  AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5323 				  AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5324 				  AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B);
5325 
5326 		add_modifier(mods, size, capacity, modifier_dcc_best);
5327 		add_modifier(mods, size, capacity, modifier_dcc_4k);
5328 
5329 		add_modifier(mods, size, capacity, modifier_dcc_best | AMD_FMT_MOD_SET(DCC_RETILE, 1));
5330 		add_modifier(mods, size, capacity, modifier_dcc_4k | AMD_FMT_MOD_SET(DCC_RETILE, 1));
5331 
5332 		add_modifier(mods, size, capacity, modifier_r_x);
5333 	}
5334 
5335 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5336              AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX11) |
5337 			 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D));
5338 }
5339 
5340 static int
5341 get_plane_modifiers(struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
5342 {
5343 	uint64_t size = 0, capacity = 128;
5344 	*mods = NULL;
5345 
5346 	/* We have not hooked up any pre-GFX9 modifiers. */
5347 	if (adev->family < AMDGPU_FAMILY_AI)
5348 		return 0;
5349 
5350 	*mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
5351 
5352 	if (plane_type == DRM_PLANE_TYPE_CURSOR) {
5353 		add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5354 		add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5355 		return *mods ? 0 : -ENOMEM;
5356 	}
5357 
5358 	switch (adev->family) {
5359 	case AMDGPU_FAMILY_AI:
5360 	case AMDGPU_FAMILY_RV:
5361 		add_gfx9_modifiers(adev, mods, &size, &capacity);
5362 		break;
5363 	case AMDGPU_FAMILY_NV:
5364 	case AMDGPU_FAMILY_VGH:
5365 	case AMDGPU_FAMILY_YC:
5366 	case AMDGPU_FAMILY_GC_10_3_6:
5367 	case AMDGPU_FAMILY_GC_10_3_7:
5368 		if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
5369 			add_gfx10_3_modifiers(adev, mods, &size, &capacity);
5370 		else
5371 			add_gfx10_1_modifiers(adev, mods, &size, &capacity);
5372 		break;
5373 	case AMDGPU_FAMILY_GC_11_0_0:
5374 		add_gfx11_modifiers(adev, mods, &size, &capacity);
5375 		break;
5376 	}
5377 
5378 	add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5379 
5380 	/* INVALID marks the end of the list. */
5381 	add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5382 
5383 	if (!*mods)
5384 		return -ENOMEM;
5385 
5386 	return 0;
5387 }
5388 
5389 static int
5390 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
5391 					  const struct amdgpu_framebuffer *afb,
5392 					  const enum surface_pixel_format format,
5393 					  const enum dc_rotation_angle rotation,
5394 					  const struct plane_size *plane_size,
5395 					  union dc_tiling_info *tiling_info,
5396 					  struct dc_plane_dcc_param *dcc,
5397 					  struct dc_plane_address *address,
5398 					  const bool force_disable_dcc)
5399 {
5400 	const uint64_t modifier = afb->base.modifier;
5401 	int ret = 0;
5402 
5403 	fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
5404 	tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
5405 
5406 	if (modifier_has_dcc(modifier) && !force_disable_dcc) {
5407 		uint64_t dcc_address = afb->address + afb->base.offsets[1];
5408 		bool independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
5409 		bool independent_128b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_128B, modifier);
5410 
5411 		dcc->enable = 1;
5412 		dcc->meta_pitch = afb->base.pitches[1];
5413 		dcc->independent_64b_blks = independent_64b_blks;
5414 		if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) >= AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) {
5415 			if (independent_64b_blks && independent_128b_blks)
5416 				dcc->dcc_ind_blk = hubp_ind_block_64b_no_128bcl;
5417 			else if (independent_128b_blks)
5418 				dcc->dcc_ind_blk = hubp_ind_block_128b;
5419 			else if (independent_64b_blks && !independent_128b_blks)
5420 				dcc->dcc_ind_blk = hubp_ind_block_64b;
5421 			else
5422 				dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5423 		} else {
5424 			if (independent_64b_blks)
5425 				dcc->dcc_ind_blk = hubp_ind_block_64b;
5426 			else
5427 				dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5428 		}
5429 
5430 		address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
5431 		address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
5432 	}
5433 
5434 	ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
5435 	if (ret)
5436 		drm_dbg_kms(adev_to_drm(adev), "validate_dcc: returned error: %d\n", ret);
5437 
5438 	return ret;
5439 }
5440 
5441 static int
5442 fill_plane_buffer_attributes(struct amdgpu_device *adev,
5443 			     const struct amdgpu_framebuffer *afb,
5444 			     const enum surface_pixel_format format,
5445 			     const enum dc_rotation_angle rotation,
5446 			     const uint64_t tiling_flags,
5447 			     union dc_tiling_info *tiling_info,
5448 			     struct plane_size *plane_size,
5449 			     struct dc_plane_dcc_param *dcc,
5450 			     struct dc_plane_address *address,
5451 			     bool tmz_surface,
5452 			     bool force_disable_dcc)
5453 {
5454 	const struct drm_framebuffer *fb = &afb->base;
5455 	int ret;
5456 
5457 	memset(tiling_info, 0, sizeof(*tiling_info));
5458 	memset(plane_size, 0, sizeof(*plane_size));
5459 	memset(dcc, 0, sizeof(*dcc));
5460 	memset(address, 0, sizeof(*address));
5461 
5462 	address->tmz_surface = tmz_surface;
5463 
5464 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
5465 		uint64_t addr = afb->address + fb->offsets[0];
5466 
5467 		plane_size->surface_size.x = 0;
5468 		plane_size->surface_size.y = 0;
5469 		plane_size->surface_size.width = fb->width;
5470 		plane_size->surface_size.height = fb->height;
5471 		plane_size->surface_pitch =
5472 			fb->pitches[0] / fb->format->cpp[0];
5473 
5474 		address->type = PLN_ADDR_TYPE_GRAPHICS;
5475 		address->grph.addr.low_part = lower_32_bits(addr);
5476 		address->grph.addr.high_part = upper_32_bits(addr);
5477 	} else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
5478 		uint64_t luma_addr = afb->address + fb->offsets[0];
5479 		uint64_t chroma_addr = afb->address + fb->offsets[1];
5480 
5481 		plane_size->surface_size.x = 0;
5482 		plane_size->surface_size.y = 0;
5483 		plane_size->surface_size.width = fb->width;
5484 		plane_size->surface_size.height = fb->height;
5485 		plane_size->surface_pitch =
5486 			fb->pitches[0] / fb->format->cpp[0];
5487 
5488 		plane_size->chroma_size.x = 0;
5489 		plane_size->chroma_size.y = 0;
5490 		/* TODO: set these based on surface format */
5491 		plane_size->chroma_size.width = fb->width / 2;
5492 		plane_size->chroma_size.height = fb->height / 2;
5493 
5494 		plane_size->chroma_pitch =
5495 			fb->pitches[1] / fb->format->cpp[1];
5496 
5497 		address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
5498 		address->video_progressive.luma_addr.low_part =
5499 			lower_32_bits(luma_addr);
5500 		address->video_progressive.luma_addr.high_part =
5501 			upper_32_bits(luma_addr);
5502 		address->video_progressive.chroma_addr.low_part =
5503 			lower_32_bits(chroma_addr);
5504 		address->video_progressive.chroma_addr.high_part =
5505 			upper_32_bits(chroma_addr);
5506 	}
5507 
5508 	if (adev->family >= AMDGPU_FAMILY_AI) {
5509 		ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
5510 								rotation, plane_size,
5511 								tiling_info, dcc,
5512 								address,
5513 								force_disable_dcc);
5514 		if (ret)
5515 			return ret;
5516 	} else {
5517 		fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
5518 	}
5519 
5520 	return 0;
5521 }
5522 
5523 static void
5524 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
5525 			       bool *per_pixel_alpha, bool *pre_multiplied_alpha,
5526 			       bool *global_alpha, int *global_alpha_value)
5527 {
5528 	*per_pixel_alpha = false;
5529 	*pre_multiplied_alpha = true;
5530 	*global_alpha = false;
5531 	*global_alpha_value = 0xff;
5532 
5533 	if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
5534 		return;
5535 
5536 	if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI ||
5537 		plane_state->pixel_blend_mode == DRM_MODE_BLEND_COVERAGE) {
5538 		static const uint32_t alpha_formats[] = {
5539 			DRM_FORMAT_ARGB8888,
5540 			DRM_FORMAT_RGBA8888,
5541 			DRM_FORMAT_ABGR8888,
5542 		};
5543 		uint32_t format = plane_state->fb->format->format;
5544 		unsigned int i;
5545 
5546 		for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
5547 			if (format == alpha_formats[i]) {
5548 				*per_pixel_alpha = true;
5549 				break;
5550 			}
5551 		}
5552 
5553 		if (per_pixel_alpha && plane_state->pixel_blend_mode == DRM_MODE_BLEND_COVERAGE)
5554 			*pre_multiplied_alpha = false;
5555 	}
5556 
5557 	if (plane_state->alpha < 0xffff) {
5558 		*global_alpha = true;
5559 		*global_alpha_value = plane_state->alpha >> 8;
5560 	}
5561 }
5562 
5563 static int
5564 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
5565 			    const enum surface_pixel_format format,
5566 			    enum dc_color_space *color_space)
5567 {
5568 	bool full_range;
5569 
5570 	*color_space = COLOR_SPACE_SRGB;
5571 
5572 	/* DRM color properties only affect non-RGB formats. */
5573 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
5574 		return 0;
5575 
5576 	full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
5577 
5578 	switch (plane_state->color_encoding) {
5579 	case DRM_COLOR_YCBCR_BT601:
5580 		if (full_range)
5581 			*color_space = COLOR_SPACE_YCBCR601;
5582 		else
5583 			*color_space = COLOR_SPACE_YCBCR601_LIMITED;
5584 		break;
5585 
5586 	case DRM_COLOR_YCBCR_BT709:
5587 		if (full_range)
5588 			*color_space = COLOR_SPACE_YCBCR709;
5589 		else
5590 			*color_space = COLOR_SPACE_YCBCR709_LIMITED;
5591 		break;
5592 
5593 	case DRM_COLOR_YCBCR_BT2020:
5594 		if (full_range)
5595 			*color_space = COLOR_SPACE_2020_YCBCR;
5596 		else
5597 			return -EINVAL;
5598 		break;
5599 
5600 	default:
5601 		return -EINVAL;
5602 	}
5603 
5604 	return 0;
5605 }
5606 
5607 static int
5608 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
5609 			    const struct drm_plane_state *plane_state,
5610 			    const uint64_t tiling_flags,
5611 			    struct dc_plane_info *plane_info,
5612 			    struct dc_plane_address *address,
5613 			    bool tmz_surface,
5614 			    bool force_disable_dcc)
5615 {
5616 	const struct drm_framebuffer *fb = plane_state->fb;
5617 	const struct amdgpu_framebuffer *afb =
5618 		to_amdgpu_framebuffer(plane_state->fb);
5619 	int ret;
5620 
5621 	memset(plane_info, 0, sizeof(*plane_info));
5622 
5623 	switch (fb->format->format) {
5624 	case DRM_FORMAT_C8:
5625 		plane_info->format =
5626 			SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
5627 		break;
5628 	case DRM_FORMAT_RGB565:
5629 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
5630 		break;
5631 	case DRM_FORMAT_XRGB8888:
5632 	case DRM_FORMAT_ARGB8888:
5633 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
5634 		break;
5635 	case DRM_FORMAT_XRGB2101010:
5636 	case DRM_FORMAT_ARGB2101010:
5637 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
5638 		break;
5639 	case DRM_FORMAT_XBGR2101010:
5640 	case DRM_FORMAT_ABGR2101010:
5641 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
5642 		break;
5643 	case DRM_FORMAT_XBGR8888:
5644 	case DRM_FORMAT_ABGR8888:
5645 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
5646 		break;
5647 	case DRM_FORMAT_NV21:
5648 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
5649 		break;
5650 	case DRM_FORMAT_NV12:
5651 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
5652 		break;
5653 	case DRM_FORMAT_P010:
5654 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
5655 		break;
5656 	case DRM_FORMAT_XRGB16161616F:
5657 	case DRM_FORMAT_ARGB16161616F:
5658 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
5659 		break;
5660 	case DRM_FORMAT_XBGR16161616F:
5661 	case DRM_FORMAT_ABGR16161616F:
5662 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
5663 		break;
5664 	case DRM_FORMAT_XRGB16161616:
5665 	case DRM_FORMAT_ARGB16161616:
5666 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616;
5667 		break;
5668 	case DRM_FORMAT_XBGR16161616:
5669 	case DRM_FORMAT_ABGR16161616:
5670 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616;
5671 		break;
5672 	default:
5673 		DRM_ERROR(
5674 			"Unsupported screen format %p4cc\n",
5675 			&fb->format->format);
5676 		return -EINVAL;
5677 	}
5678 
5679 	switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
5680 	case DRM_MODE_ROTATE_0:
5681 		plane_info->rotation = ROTATION_ANGLE_0;
5682 		break;
5683 	case DRM_MODE_ROTATE_90:
5684 		plane_info->rotation = ROTATION_ANGLE_90;
5685 		break;
5686 	case DRM_MODE_ROTATE_180:
5687 		plane_info->rotation = ROTATION_ANGLE_180;
5688 		break;
5689 	case DRM_MODE_ROTATE_270:
5690 		plane_info->rotation = ROTATION_ANGLE_270;
5691 		break;
5692 	default:
5693 		plane_info->rotation = ROTATION_ANGLE_0;
5694 		break;
5695 	}
5696 
5697 	plane_info->visible = true;
5698 	plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
5699 
5700 	plane_info->layer_index = 0;
5701 
5702 	ret = fill_plane_color_attributes(plane_state, plane_info->format,
5703 					  &plane_info->color_space);
5704 	if (ret)
5705 		return ret;
5706 
5707 	ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
5708 					   plane_info->rotation, tiling_flags,
5709 					   &plane_info->tiling_info,
5710 					   &plane_info->plane_size,
5711 					   &plane_info->dcc, address, tmz_surface,
5712 					   force_disable_dcc);
5713 	if (ret)
5714 		return ret;
5715 
5716 	fill_blending_from_plane_state(
5717 		plane_state, &plane_info->per_pixel_alpha, &plane_info->pre_multiplied_alpha,
5718 		&plane_info->global_alpha, &plane_info->global_alpha_value);
5719 
5720 	return 0;
5721 }
5722 
5723 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
5724 				    struct dc_plane_state *dc_plane_state,
5725 				    struct drm_plane_state *plane_state,
5726 				    struct drm_crtc_state *crtc_state)
5727 {
5728 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
5729 	struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
5730 	struct dc_scaling_info scaling_info;
5731 	struct dc_plane_info plane_info;
5732 	int ret;
5733 	bool force_disable_dcc = false;
5734 
5735 	ret = fill_dc_scaling_info(adev, plane_state, &scaling_info);
5736 	if (ret)
5737 		return ret;
5738 
5739 	dc_plane_state->src_rect = scaling_info.src_rect;
5740 	dc_plane_state->dst_rect = scaling_info.dst_rect;
5741 	dc_plane_state->clip_rect = scaling_info.clip_rect;
5742 	dc_plane_state->scaling_quality = scaling_info.scaling_quality;
5743 
5744 	force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
5745 	ret = fill_dc_plane_info_and_addr(adev, plane_state,
5746 					  afb->tiling_flags,
5747 					  &plane_info,
5748 					  &dc_plane_state->address,
5749 					  afb->tmz_surface,
5750 					  force_disable_dcc);
5751 	if (ret)
5752 		return ret;
5753 
5754 	dc_plane_state->format = plane_info.format;
5755 	dc_plane_state->color_space = plane_info.color_space;
5756 	dc_plane_state->format = plane_info.format;
5757 	dc_plane_state->plane_size = plane_info.plane_size;
5758 	dc_plane_state->rotation = plane_info.rotation;
5759 	dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
5760 	dc_plane_state->stereo_format = plane_info.stereo_format;
5761 	dc_plane_state->tiling_info = plane_info.tiling_info;
5762 	dc_plane_state->visible = plane_info.visible;
5763 	dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
5764 	dc_plane_state->pre_multiplied_alpha = plane_info.pre_multiplied_alpha;
5765 	dc_plane_state->global_alpha = plane_info.global_alpha;
5766 	dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
5767 	dc_plane_state->dcc = plane_info.dcc;
5768 	dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
5769 	dc_plane_state->flip_int_enabled = true;
5770 
5771 	/*
5772 	 * Always set input transfer function, since plane state is refreshed
5773 	 * every time.
5774 	 */
5775 	ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
5776 	if (ret)
5777 		return ret;
5778 
5779 	return 0;
5780 }
5781 
5782 /**
5783  * fill_dc_dirty_rects() - Fill DC dirty regions for PSR selective updates
5784  *
5785  * @plane: DRM plane containing dirty regions that need to be flushed to the eDP
5786  *         remote fb
5787  * @old_plane_state: Old state of @plane
5788  * @new_plane_state: New state of @plane
5789  * @crtc_state: New state of CRTC connected to the @plane
5790  * @flip_addrs: DC flip tracking struct, which also tracts dirty rects
5791  *
5792  * For PSR SU, DC informs the DMUB uController of dirty rectangle regions
5793  * (referred to as "damage clips" in DRM nomenclature) that require updating on
5794  * the eDP remote buffer. The responsibility of specifying the dirty regions is
5795  * amdgpu_dm's.
5796  *
5797  * A damage-aware DRM client should fill the FB_DAMAGE_CLIPS property on the
5798  * plane with regions that require flushing to the eDP remote buffer. In
5799  * addition, certain use cases - such as cursor and multi-plane overlay (MPO) -
5800  * implicitly provide damage clips without any client support via the plane
5801  * bounds.
5802  *
5803  * Today, amdgpu_dm only supports the MPO and cursor usecase.
5804  *
5805  * TODO: Also enable for FB_DAMAGE_CLIPS
5806  */
5807 static void fill_dc_dirty_rects(struct drm_plane *plane,
5808 				struct drm_plane_state *old_plane_state,
5809 				struct drm_plane_state *new_plane_state,
5810 				struct drm_crtc_state *crtc_state,
5811 				struct dc_flip_addrs *flip_addrs)
5812 {
5813 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
5814 	struct rect *dirty_rects = flip_addrs->dirty_rects;
5815 	uint32_t num_clips;
5816 	bool bb_changed;
5817 	bool fb_changed;
5818 	uint32_t i = 0;
5819 
5820 	flip_addrs->dirty_rect_count = 0;
5821 
5822 	/*
5823 	 * Cursor plane has it's own dirty rect update interface. See
5824 	 * dcn10_dmub_update_cursor_data and dmub_cmd_update_cursor_info_data
5825 	 */
5826 	if (plane->type == DRM_PLANE_TYPE_CURSOR)
5827 		return;
5828 
5829 	/*
5830 	 * Today, we only consider MPO use-case for PSR SU. If MPO not
5831 	 * requested, and there is a plane update, do FFU.
5832 	 */
5833 	if (!dm_crtc_state->mpo_requested) {
5834 		dirty_rects[0].x = 0;
5835 		dirty_rects[0].y = 0;
5836 		dirty_rects[0].width = dm_crtc_state->base.mode.crtc_hdisplay;
5837 		dirty_rects[0].height = dm_crtc_state->base.mode.crtc_vdisplay;
5838 		flip_addrs->dirty_rect_count = 1;
5839 		DRM_DEBUG_DRIVER("[PLANE:%d] PSR FFU dirty rect size (%d, %d)\n",
5840 				 new_plane_state->plane->base.id,
5841 				 dm_crtc_state->base.mode.crtc_hdisplay,
5842 				 dm_crtc_state->base.mode.crtc_vdisplay);
5843 		return;
5844 	}
5845 
5846 	/*
5847 	 * MPO is requested. Add entire plane bounding box to dirty rects if
5848 	 * flipped to or damaged.
5849 	 *
5850 	 * If plane is moved or resized, also add old bounding box to dirty
5851 	 * rects.
5852 	 */
5853 	num_clips = drm_plane_get_damage_clips_count(new_plane_state);
5854 	fb_changed = old_plane_state->fb->base.id !=
5855 		     new_plane_state->fb->base.id;
5856 	bb_changed = (old_plane_state->crtc_x != new_plane_state->crtc_x ||
5857 		      old_plane_state->crtc_y != new_plane_state->crtc_y ||
5858 		      old_plane_state->crtc_w != new_plane_state->crtc_w ||
5859 		      old_plane_state->crtc_h != new_plane_state->crtc_h);
5860 
5861 	DRM_DEBUG_DRIVER("[PLANE:%d] PSR bb_changed:%d fb_changed:%d num_clips:%d\n",
5862 			 new_plane_state->plane->base.id,
5863 			 bb_changed, fb_changed, num_clips);
5864 
5865 	if (num_clips || fb_changed || bb_changed) {
5866 		dirty_rects[i].x = new_plane_state->crtc_x;
5867 		dirty_rects[i].y = new_plane_state->crtc_y;
5868 		dirty_rects[i].width = new_plane_state->crtc_w;
5869 		dirty_rects[i].height = new_plane_state->crtc_h;
5870 		DRM_DEBUG_DRIVER("[PLANE:%d] PSR SU dirty rect at (%d, %d) size (%d, %d)\n",
5871 				 new_plane_state->plane->base.id,
5872 				 dirty_rects[i].x, dirty_rects[i].y,
5873 				 dirty_rects[i].width, dirty_rects[i].height);
5874 		i += 1;
5875 	}
5876 
5877 	/* Add old plane bounding-box if plane is moved or resized */
5878 	if (bb_changed) {
5879 		dirty_rects[i].x = old_plane_state->crtc_x;
5880 		dirty_rects[i].y = old_plane_state->crtc_y;
5881 		dirty_rects[i].width = old_plane_state->crtc_w;
5882 		dirty_rects[i].height = old_plane_state->crtc_h;
5883 		DRM_DEBUG_DRIVER("[PLANE:%d] PSR SU dirty rect at (%d, %d) size (%d, %d)\n",
5884 				old_plane_state->plane->base.id,
5885 				dirty_rects[i].x, dirty_rects[i].y,
5886 				dirty_rects[i].width, dirty_rects[i].height);
5887 		i += 1;
5888 	}
5889 
5890 	flip_addrs->dirty_rect_count = i;
5891 }
5892 
5893 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
5894 					   const struct dm_connector_state *dm_state,
5895 					   struct dc_stream_state *stream)
5896 {
5897 	enum amdgpu_rmx_type rmx_type;
5898 
5899 	struct rect src = { 0 }; /* viewport in composition space*/
5900 	struct rect dst = { 0 }; /* stream addressable area */
5901 
5902 	/* no mode. nothing to be done */
5903 	if (!mode)
5904 		return;
5905 
5906 	/* Full screen scaling by default */
5907 	src.width = mode->hdisplay;
5908 	src.height = mode->vdisplay;
5909 	dst.width = stream->timing.h_addressable;
5910 	dst.height = stream->timing.v_addressable;
5911 
5912 	if (dm_state) {
5913 		rmx_type = dm_state->scaling;
5914 		if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
5915 			if (src.width * dst.height <
5916 					src.height * dst.width) {
5917 				/* height needs less upscaling/more downscaling */
5918 				dst.width = src.width *
5919 						dst.height / src.height;
5920 			} else {
5921 				/* width needs less upscaling/more downscaling */
5922 				dst.height = src.height *
5923 						dst.width / src.width;
5924 			}
5925 		} else if (rmx_type == RMX_CENTER) {
5926 			dst = src;
5927 		}
5928 
5929 		dst.x = (stream->timing.h_addressable - dst.width) / 2;
5930 		dst.y = (stream->timing.v_addressable - dst.height) / 2;
5931 
5932 		if (dm_state->underscan_enable) {
5933 			dst.x += dm_state->underscan_hborder / 2;
5934 			dst.y += dm_state->underscan_vborder / 2;
5935 			dst.width -= dm_state->underscan_hborder;
5936 			dst.height -= dm_state->underscan_vborder;
5937 		}
5938 	}
5939 
5940 	stream->src = src;
5941 	stream->dst = dst;
5942 
5943 	DRM_DEBUG_KMS("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
5944 		      dst.x, dst.y, dst.width, dst.height);
5945 
5946 }
5947 
5948 static enum dc_color_depth
5949 convert_color_depth_from_display_info(const struct drm_connector *connector,
5950 				      bool is_y420, int requested_bpc)
5951 {
5952 	uint8_t bpc;
5953 
5954 	if (is_y420) {
5955 		bpc = 8;
5956 
5957 		/* Cap display bpc based on HDMI 2.0 HF-VSDB */
5958 		if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
5959 			bpc = 16;
5960 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
5961 			bpc = 12;
5962 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
5963 			bpc = 10;
5964 	} else {
5965 		bpc = (uint8_t)connector->display_info.bpc;
5966 		/* Assume 8 bpc by default if no bpc is specified. */
5967 		bpc = bpc ? bpc : 8;
5968 	}
5969 
5970 	if (requested_bpc > 0) {
5971 		/*
5972 		 * Cap display bpc based on the user requested value.
5973 		 *
5974 		 * The value for state->max_bpc may not correctly updated
5975 		 * depending on when the connector gets added to the state
5976 		 * or if this was called outside of atomic check, so it
5977 		 * can't be used directly.
5978 		 */
5979 		bpc = min_t(u8, bpc, requested_bpc);
5980 
5981 		/* Round down to the nearest even number. */
5982 		bpc = bpc - (bpc & 1);
5983 	}
5984 
5985 	switch (bpc) {
5986 	case 0:
5987 		/*
5988 		 * Temporary Work around, DRM doesn't parse color depth for
5989 		 * EDID revision before 1.4
5990 		 * TODO: Fix edid parsing
5991 		 */
5992 		return COLOR_DEPTH_888;
5993 	case 6:
5994 		return COLOR_DEPTH_666;
5995 	case 8:
5996 		return COLOR_DEPTH_888;
5997 	case 10:
5998 		return COLOR_DEPTH_101010;
5999 	case 12:
6000 		return COLOR_DEPTH_121212;
6001 	case 14:
6002 		return COLOR_DEPTH_141414;
6003 	case 16:
6004 		return COLOR_DEPTH_161616;
6005 	default:
6006 		return COLOR_DEPTH_UNDEFINED;
6007 	}
6008 }
6009 
6010 static enum dc_aspect_ratio
6011 get_aspect_ratio(const struct drm_display_mode *mode_in)
6012 {
6013 	/* 1-1 mapping, since both enums follow the HDMI spec. */
6014 	return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
6015 }
6016 
6017 static enum dc_color_space
6018 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
6019 {
6020 	enum dc_color_space color_space = COLOR_SPACE_SRGB;
6021 
6022 	switch (dc_crtc_timing->pixel_encoding)	{
6023 	case PIXEL_ENCODING_YCBCR422:
6024 	case PIXEL_ENCODING_YCBCR444:
6025 	case PIXEL_ENCODING_YCBCR420:
6026 	{
6027 		/*
6028 		 * 27030khz is the separation point between HDTV and SDTV
6029 		 * according to HDMI spec, we use YCbCr709 and YCbCr601
6030 		 * respectively
6031 		 */
6032 		if (dc_crtc_timing->pix_clk_100hz > 270300) {
6033 			if (dc_crtc_timing->flags.Y_ONLY)
6034 				color_space =
6035 					COLOR_SPACE_YCBCR709_LIMITED;
6036 			else
6037 				color_space = COLOR_SPACE_YCBCR709;
6038 		} else {
6039 			if (dc_crtc_timing->flags.Y_ONLY)
6040 				color_space =
6041 					COLOR_SPACE_YCBCR601_LIMITED;
6042 			else
6043 				color_space = COLOR_SPACE_YCBCR601;
6044 		}
6045 
6046 	}
6047 	break;
6048 	case PIXEL_ENCODING_RGB:
6049 		color_space = COLOR_SPACE_SRGB;
6050 		break;
6051 
6052 	default:
6053 		WARN_ON(1);
6054 		break;
6055 	}
6056 
6057 	return color_space;
6058 }
6059 
6060 static bool adjust_colour_depth_from_display_info(
6061 	struct dc_crtc_timing *timing_out,
6062 	const struct drm_display_info *info)
6063 {
6064 	enum dc_color_depth depth = timing_out->display_color_depth;
6065 	int normalized_clk;
6066 	do {
6067 		normalized_clk = timing_out->pix_clk_100hz / 10;
6068 		/* YCbCr 4:2:0 requires additional adjustment of 1/2 */
6069 		if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
6070 			normalized_clk /= 2;
6071 		/* Adjusting pix clock following on HDMI spec based on colour depth */
6072 		switch (depth) {
6073 		case COLOR_DEPTH_888:
6074 			break;
6075 		case COLOR_DEPTH_101010:
6076 			normalized_clk = (normalized_clk * 30) / 24;
6077 			break;
6078 		case COLOR_DEPTH_121212:
6079 			normalized_clk = (normalized_clk * 36) / 24;
6080 			break;
6081 		case COLOR_DEPTH_161616:
6082 			normalized_clk = (normalized_clk * 48) / 24;
6083 			break;
6084 		default:
6085 			/* The above depths are the only ones valid for HDMI. */
6086 			return false;
6087 		}
6088 		if (normalized_clk <= info->max_tmds_clock) {
6089 			timing_out->display_color_depth = depth;
6090 			return true;
6091 		}
6092 	} while (--depth > COLOR_DEPTH_666);
6093 	return false;
6094 }
6095 
6096 static void fill_stream_properties_from_drm_display_mode(
6097 	struct dc_stream_state *stream,
6098 	const struct drm_display_mode *mode_in,
6099 	const struct drm_connector *connector,
6100 	const struct drm_connector_state *connector_state,
6101 	const struct dc_stream_state *old_stream,
6102 	int requested_bpc)
6103 {
6104 	struct dc_crtc_timing *timing_out = &stream->timing;
6105 	const struct drm_display_info *info = &connector->display_info;
6106 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6107 	struct hdmi_vendor_infoframe hv_frame;
6108 	struct hdmi_avi_infoframe avi_frame;
6109 
6110 	memset(&hv_frame, 0, sizeof(hv_frame));
6111 	memset(&avi_frame, 0, sizeof(avi_frame));
6112 
6113 	timing_out->h_border_left = 0;
6114 	timing_out->h_border_right = 0;
6115 	timing_out->v_border_top = 0;
6116 	timing_out->v_border_bottom = 0;
6117 	/* TODO: un-hardcode */
6118 	if (drm_mode_is_420_only(info, mode_in)
6119 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
6120 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
6121 	else if (drm_mode_is_420_also(info, mode_in)
6122 			&& aconnector->force_yuv420_output)
6123 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
6124 	else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCBCR444)
6125 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
6126 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
6127 	else
6128 		timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
6129 
6130 	timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
6131 	timing_out->display_color_depth = convert_color_depth_from_display_info(
6132 		connector,
6133 		(timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
6134 		requested_bpc);
6135 	timing_out->scan_type = SCANNING_TYPE_NODATA;
6136 	timing_out->hdmi_vic = 0;
6137 
6138 	if(old_stream) {
6139 		timing_out->vic = old_stream->timing.vic;
6140 		timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
6141 		timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
6142 	} else {
6143 		timing_out->vic = drm_match_cea_mode(mode_in);
6144 		if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
6145 			timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
6146 		if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
6147 			timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
6148 	}
6149 
6150 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
6151 		drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
6152 		timing_out->vic = avi_frame.video_code;
6153 		drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
6154 		timing_out->hdmi_vic = hv_frame.vic;
6155 	}
6156 
6157 	if (is_freesync_video_mode(mode_in, aconnector)) {
6158 		timing_out->h_addressable = mode_in->hdisplay;
6159 		timing_out->h_total = mode_in->htotal;
6160 		timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
6161 		timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
6162 		timing_out->v_total = mode_in->vtotal;
6163 		timing_out->v_addressable = mode_in->vdisplay;
6164 		timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
6165 		timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
6166 		timing_out->pix_clk_100hz = mode_in->clock * 10;
6167 	} else {
6168 		timing_out->h_addressable = mode_in->crtc_hdisplay;
6169 		timing_out->h_total = mode_in->crtc_htotal;
6170 		timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
6171 		timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
6172 		timing_out->v_total = mode_in->crtc_vtotal;
6173 		timing_out->v_addressable = mode_in->crtc_vdisplay;
6174 		timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
6175 		timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
6176 		timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
6177 	}
6178 
6179 	timing_out->aspect_ratio = get_aspect_ratio(mode_in);
6180 
6181 	stream->output_color_space = get_output_color_space(timing_out);
6182 
6183 	stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
6184 	stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
6185 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
6186 		if (!adjust_colour_depth_from_display_info(timing_out, info) &&
6187 		    drm_mode_is_420_also(info, mode_in) &&
6188 		    timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
6189 			timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
6190 			adjust_colour_depth_from_display_info(timing_out, info);
6191 		}
6192 	}
6193 }
6194 
6195 static void fill_audio_info(struct audio_info *audio_info,
6196 			    const struct drm_connector *drm_connector,
6197 			    const struct dc_sink *dc_sink)
6198 {
6199 	int i = 0;
6200 	int cea_revision = 0;
6201 	const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
6202 
6203 	audio_info->manufacture_id = edid_caps->manufacturer_id;
6204 	audio_info->product_id = edid_caps->product_id;
6205 
6206 	cea_revision = drm_connector->display_info.cea_rev;
6207 
6208 	strscpy(audio_info->display_name,
6209 		edid_caps->display_name,
6210 		AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
6211 
6212 	if (cea_revision >= 3) {
6213 		audio_info->mode_count = edid_caps->audio_mode_count;
6214 
6215 		for (i = 0; i < audio_info->mode_count; ++i) {
6216 			audio_info->modes[i].format_code =
6217 					(enum audio_format_code)
6218 					(edid_caps->audio_modes[i].format_code);
6219 			audio_info->modes[i].channel_count =
6220 					edid_caps->audio_modes[i].channel_count;
6221 			audio_info->modes[i].sample_rates.all =
6222 					edid_caps->audio_modes[i].sample_rate;
6223 			audio_info->modes[i].sample_size =
6224 					edid_caps->audio_modes[i].sample_size;
6225 		}
6226 	}
6227 
6228 	audio_info->flags.all = edid_caps->speaker_flags;
6229 
6230 	/* TODO: We only check for the progressive mode, check for interlace mode too */
6231 	if (drm_connector->latency_present[0]) {
6232 		audio_info->video_latency = drm_connector->video_latency[0];
6233 		audio_info->audio_latency = drm_connector->audio_latency[0];
6234 	}
6235 
6236 	/* TODO: For DP, video and audio latency should be calculated from DPCD caps */
6237 
6238 }
6239 
6240 static void
6241 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
6242 				      struct drm_display_mode *dst_mode)
6243 {
6244 	dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
6245 	dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
6246 	dst_mode->crtc_clock = src_mode->crtc_clock;
6247 	dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
6248 	dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
6249 	dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
6250 	dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
6251 	dst_mode->crtc_htotal = src_mode->crtc_htotal;
6252 	dst_mode->crtc_hskew = src_mode->crtc_hskew;
6253 	dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
6254 	dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
6255 	dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
6256 	dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
6257 	dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
6258 }
6259 
6260 static void
6261 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
6262 					const struct drm_display_mode *native_mode,
6263 					bool scale_enabled)
6264 {
6265 	if (scale_enabled) {
6266 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
6267 	} else if (native_mode->clock == drm_mode->clock &&
6268 			native_mode->htotal == drm_mode->htotal &&
6269 			native_mode->vtotal == drm_mode->vtotal) {
6270 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
6271 	} else {
6272 		/* no scaling nor amdgpu inserted, no need to patch */
6273 	}
6274 }
6275 
6276 static struct dc_sink *
6277 create_fake_sink(struct amdgpu_dm_connector *aconnector)
6278 {
6279 	struct dc_sink_init_data sink_init_data = { 0 };
6280 	struct dc_sink *sink = NULL;
6281 	sink_init_data.link = aconnector->dc_link;
6282 	sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
6283 
6284 	sink = dc_sink_create(&sink_init_data);
6285 	if (!sink) {
6286 		DRM_ERROR("Failed to create sink!\n");
6287 		return NULL;
6288 	}
6289 	sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
6290 
6291 	return sink;
6292 }
6293 
6294 static void set_multisync_trigger_params(
6295 		struct dc_stream_state *stream)
6296 {
6297 	struct dc_stream_state *master = NULL;
6298 
6299 	if (stream->triggered_crtc_reset.enabled) {
6300 		master = stream->triggered_crtc_reset.event_source;
6301 		stream->triggered_crtc_reset.event =
6302 			master->timing.flags.VSYNC_POSITIVE_POLARITY ?
6303 			CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
6304 		stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
6305 	}
6306 }
6307 
6308 static void set_master_stream(struct dc_stream_state *stream_set[],
6309 			      int stream_count)
6310 {
6311 	int j, highest_rfr = 0, master_stream = 0;
6312 
6313 	for (j = 0;  j < stream_count; j++) {
6314 		if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
6315 			int refresh_rate = 0;
6316 
6317 			refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
6318 				(stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
6319 			if (refresh_rate > highest_rfr) {
6320 				highest_rfr = refresh_rate;
6321 				master_stream = j;
6322 			}
6323 		}
6324 	}
6325 	for (j = 0;  j < stream_count; j++) {
6326 		if (stream_set[j])
6327 			stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
6328 	}
6329 }
6330 
6331 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
6332 {
6333 	int i = 0;
6334 	struct dc_stream_state *stream;
6335 
6336 	if (context->stream_count < 2)
6337 		return;
6338 	for (i = 0; i < context->stream_count ; i++) {
6339 		if (!context->streams[i])
6340 			continue;
6341 		/*
6342 		 * TODO: add a function to read AMD VSDB bits and set
6343 		 * crtc_sync_master.multi_sync_enabled flag
6344 		 * For now it's set to false
6345 		 */
6346 	}
6347 
6348 	set_master_stream(context->streams, context->stream_count);
6349 
6350 	for (i = 0; i < context->stream_count ; i++) {
6351 		stream = context->streams[i];
6352 
6353 		if (!stream)
6354 			continue;
6355 
6356 		set_multisync_trigger_params(stream);
6357 	}
6358 }
6359 
6360 #if defined(CONFIG_DRM_AMD_DC_DCN)
6361 static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
6362 							struct dc_sink *sink, struct dc_stream_state *stream,
6363 							struct dsc_dec_dpcd_caps *dsc_caps)
6364 {
6365 	stream->timing.flags.DSC = 0;
6366 	dsc_caps->is_dsc_supported = false;
6367 
6368 	if (aconnector->dc_link && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT ||
6369 		sink->sink_signal == SIGNAL_TYPE_EDP)) {
6370 		if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE ||
6371 			sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
6372 			dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
6373 				aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
6374 				aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
6375 				dsc_caps);
6376 	}
6377 }
6378 
6379 static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector,
6380 				    struct dc_sink *sink, struct dc_stream_state *stream,
6381 				    struct dsc_dec_dpcd_caps *dsc_caps,
6382 				    uint32_t max_dsc_target_bpp_limit_override)
6383 {
6384 	const struct dc_link_settings *verified_link_cap = NULL;
6385 	uint32_t link_bw_in_kbps;
6386 	uint32_t edp_min_bpp_x16, edp_max_bpp_x16;
6387 	struct dc *dc = sink->ctx->dc;
6388 	struct dc_dsc_bw_range bw_range = {0};
6389 	struct dc_dsc_config dsc_cfg = {0};
6390 
6391 	verified_link_cap = dc_link_get_link_cap(stream->link);
6392 	link_bw_in_kbps = dc_link_bandwidth_kbps(stream->link, verified_link_cap);
6393 	edp_min_bpp_x16 = 8 * 16;
6394 	edp_max_bpp_x16 = 8 * 16;
6395 
6396 	if (edp_max_bpp_x16 > dsc_caps->edp_max_bits_per_pixel)
6397 		edp_max_bpp_x16 = dsc_caps->edp_max_bits_per_pixel;
6398 
6399 	if (edp_max_bpp_x16 < edp_min_bpp_x16)
6400 		edp_min_bpp_x16 = edp_max_bpp_x16;
6401 
6402 	if (dc_dsc_compute_bandwidth_range(dc->res_pool->dscs[0],
6403 				dc->debug.dsc_min_slice_height_override,
6404 				edp_min_bpp_x16, edp_max_bpp_x16,
6405 				dsc_caps,
6406 				&stream->timing,
6407 				&bw_range)) {
6408 
6409 		if (bw_range.max_kbps < link_bw_in_kbps) {
6410 			if (dc_dsc_compute_config(dc->res_pool->dscs[0],
6411 					dsc_caps,
6412 					dc->debug.dsc_min_slice_height_override,
6413 					max_dsc_target_bpp_limit_override,
6414 					0,
6415 					&stream->timing,
6416 					&dsc_cfg)) {
6417 				stream->timing.dsc_cfg = dsc_cfg;
6418 				stream->timing.flags.DSC = 1;
6419 				stream->timing.dsc_cfg.bits_per_pixel = edp_max_bpp_x16;
6420 			}
6421 			return;
6422 		}
6423 	}
6424 
6425 	if (dc_dsc_compute_config(dc->res_pool->dscs[0],
6426 				dsc_caps,
6427 				dc->debug.dsc_min_slice_height_override,
6428 				max_dsc_target_bpp_limit_override,
6429 				link_bw_in_kbps,
6430 				&stream->timing,
6431 				&dsc_cfg)) {
6432 		stream->timing.dsc_cfg = dsc_cfg;
6433 		stream->timing.flags.DSC = 1;
6434 	}
6435 }
6436 
6437 static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
6438 										struct dc_sink *sink, struct dc_stream_state *stream,
6439 										struct dsc_dec_dpcd_caps *dsc_caps)
6440 {
6441 	struct drm_connector *drm_connector = &aconnector->base;
6442 	uint32_t link_bandwidth_kbps;
6443 	uint32_t max_dsc_target_bpp_limit_override = 0;
6444 	struct dc *dc = sink->ctx->dc;
6445 	uint32_t max_supported_bw_in_kbps, timing_bw_in_kbps;
6446 	uint32_t dsc_max_supported_bw_in_kbps;
6447 
6448 	link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
6449 							dc_link_get_link_cap(aconnector->dc_link));
6450 
6451 	if (stream->link && stream->link->local_sink)
6452 		max_dsc_target_bpp_limit_override =
6453 			stream->link->local_sink->edid_caps.panel_patch.max_dsc_target_bpp_limit;
6454 
6455 	/* Set DSC policy according to dsc_clock_en */
6456 	dc_dsc_policy_set_enable_dsc_when_not_needed(
6457 		aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
6458 
6459 	if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_EDP && !dc->debug.disable_dsc_edp &&
6460 	    dc->caps.edp_dsc_support && aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE) {
6461 
6462 		apply_dsc_policy_for_edp(aconnector, sink, stream, dsc_caps, max_dsc_target_bpp_limit_override);
6463 
6464 	} else if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6465 		if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) {
6466 			if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
6467 						dsc_caps,
6468 						aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
6469 						max_dsc_target_bpp_limit_override,
6470 						link_bandwidth_kbps,
6471 						&stream->timing,
6472 						&stream->timing.dsc_cfg)) {
6473 				stream->timing.flags.DSC = 1;
6474 				DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n",
6475 								 __func__, drm_connector->name);
6476 			}
6477 		} else if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) {
6478 			timing_bw_in_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing);
6479 			max_supported_bw_in_kbps = link_bandwidth_kbps;
6480 			dsc_max_supported_bw_in_kbps = link_bandwidth_kbps;
6481 
6482 			if (timing_bw_in_kbps > max_supported_bw_in_kbps &&
6483 					max_supported_bw_in_kbps > 0 &&
6484 					dsc_max_supported_bw_in_kbps > 0)
6485 				if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
6486 						dsc_caps,
6487 						aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
6488 						max_dsc_target_bpp_limit_override,
6489 						dsc_max_supported_bw_in_kbps,
6490 						&stream->timing,
6491 						&stream->timing.dsc_cfg)) {
6492 					stream->timing.flags.DSC = 1;
6493 					DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from DP-HDMI PCON\n",
6494 									 __func__, drm_connector->name);
6495 				}
6496 		}
6497 	}
6498 
6499 	/* Overwrite the stream flag if DSC is enabled through debugfs */
6500 	if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
6501 		stream->timing.flags.DSC = 1;
6502 
6503 	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
6504 		stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
6505 
6506 	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
6507 		stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
6508 
6509 	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
6510 		stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
6511 }
6512 #endif /* CONFIG_DRM_AMD_DC_DCN */
6513 
6514 /**
6515  * DOC: FreeSync Video
6516  *
6517  * When a userspace application wants to play a video, the content follows a
6518  * standard format definition that usually specifies the FPS for that format.
6519  * The below list illustrates some video format and the expected FPS,
6520  * respectively:
6521  *
6522  * - TV/NTSC (23.976 FPS)
6523  * - Cinema (24 FPS)
6524  * - TV/PAL (25 FPS)
6525  * - TV/NTSC (29.97 FPS)
6526  * - TV/NTSC (30 FPS)
6527  * - Cinema HFR (48 FPS)
6528  * - TV/PAL (50 FPS)
6529  * - Commonly used (60 FPS)
6530  * - Multiples of 24 (48,72,96,120 FPS)
6531  *
6532  * The list of standards video format is not huge and can be added to the
6533  * connector modeset list beforehand. With that, userspace can leverage
6534  * FreeSync to extends the front porch in order to attain the target refresh
6535  * rate. Such a switch will happen seamlessly, without screen blanking or
6536  * reprogramming of the output in any other way. If the userspace requests a
6537  * modesetting change compatible with FreeSync modes that only differ in the
6538  * refresh rate, DC will skip the full update and avoid blink during the
6539  * transition. For example, the video player can change the modesetting from
6540  * 60Hz to 30Hz for playing TV/NTSC content when it goes full screen without
6541  * causing any display blink. This same concept can be applied to a mode
6542  * setting change.
6543  */
6544 static struct drm_display_mode *
6545 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
6546 			  bool use_probed_modes)
6547 {
6548 	struct drm_display_mode *m, *m_pref = NULL;
6549 	u16 current_refresh, highest_refresh;
6550 	struct list_head *list_head = use_probed_modes ?
6551 						    &aconnector->base.probed_modes :
6552 						    &aconnector->base.modes;
6553 
6554 	if (aconnector->freesync_vid_base.clock != 0)
6555 		return &aconnector->freesync_vid_base;
6556 
6557 	/* Find the preferred mode */
6558 	list_for_each_entry (m, list_head, head) {
6559 		if (m->type & DRM_MODE_TYPE_PREFERRED) {
6560 			m_pref = m;
6561 			break;
6562 		}
6563 	}
6564 
6565 	if (!m_pref) {
6566 		/* Probably an EDID with no preferred mode. Fallback to first entry */
6567 		m_pref = list_first_entry_or_null(
6568 			&aconnector->base.modes, struct drm_display_mode, head);
6569 		if (!m_pref) {
6570 			DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
6571 			return NULL;
6572 		}
6573 	}
6574 
6575 	highest_refresh = drm_mode_vrefresh(m_pref);
6576 
6577 	/*
6578 	 * Find the mode with highest refresh rate with same resolution.
6579 	 * For some monitors, preferred mode is not the mode with highest
6580 	 * supported refresh rate.
6581 	 */
6582 	list_for_each_entry (m, list_head, head) {
6583 		current_refresh  = drm_mode_vrefresh(m);
6584 
6585 		if (m->hdisplay == m_pref->hdisplay &&
6586 		    m->vdisplay == m_pref->vdisplay &&
6587 		    highest_refresh < current_refresh) {
6588 			highest_refresh = current_refresh;
6589 			m_pref = m;
6590 		}
6591 	}
6592 
6593 	drm_mode_copy(&aconnector->freesync_vid_base, m_pref);
6594 	return m_pref;
6595 }
6596 
6597 static bool is_freesync_video_mode(const struct drm_display_mode *mode,
6598 				   struct amdgpu_dm_connector *aconnector)
6599 {
6600 	struct drm_display_mode *high_mode;
6601 	int timing_diff;
6602 
6603 	high_mode = get_highest_refresh_rate_mode(aconnector, false);
6604 	if (!high_mode || !mode)
6605 		return false;
6606 
6607 	timing_diff = high_mode->vtotal - mode->vtotal;
6608 
6609 	if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
6610 	    high_mode->hdisplay != mode->hdisplay ||
6611 	    high_mode->vdisplay != mode->vdisplay ||
6612 	    high_mode->hsync_start != mode->hsync_start ||
6613 	    high_mode->hsync_end != mode->hsync_end ||
6614 	    high_mode->htotal != mode->htotal ||
6615 	    high_mode->hskew != mode->hskew ||
6616 	    high_mode->vscan != mode->vscan ||
6617 	    high_mode->vsync_start - mode->vsync_start != timing_diff ||
6618 	    high_mode->vsync_end - mode->vsync_end != timing_diff)
6619 		return false;
6620 	else
6621 		return true;
6622 }
6623 
6624 static struct dc_stream_state *
6625 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6626 		       const struct drm_display_mode *drm_mode,
6627 		       const struct dm_connector_state *dm_state,
6628 		       const struct dc_stream_state *old_stream,
6629 		       int requested_bpc)
6630 {
6631 	struct drm_display_mode *preferred_mode = NULL;
6632 	struct drm_connector *drm_connector;
6633 	const struct drm_connector_state *con_state =
6634 		dm_state ? &dm_state->base : NULL;
6635 	struct dc_stream_state *stream = NULL;
6636 	struct drm_display_mode mode = *drm_mode;
6637 	struct drm_display_mode saved_mode;
6638 	struct drm_display_mode *freesync_mode = NULL;
6639 	bool native_mode_found = false;
6640 	bool recalculate_timing = false;
6641 	bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
6642 	int mode_refresh;
6643 	int preferred_refresh = 0;
6644 #if defined(CONFIG_DRM_AMD_DC_DCN)
6645 	struct dsc_dec_dpcd_caps dsc_caps;
6646 #endif
6647 	struct dc_sink *sink = NULL;
6648 
6649 	memset(&saved_mode, 0, sizeof(saved_mode));
6650 
6651 	if (aconnector == NULL) {
6652 		DRM_ERROR("aconnector is NULL!\n");
6653 		return stream;
6654 	}
6655 
6656 	drm_connector = &aconnector->base;
6657 
6658 	if (!aconnector->dc_sink) {
6659 		sink = create_fake_sink(aconnector);
6660 		if (!sink)
6661 			return stream;
6662 	} else {
6663 		sink = aconnector->dc_sink;
6664 		dc_sink_retain(sink);
6665 	}
6666 
6667 	stream = dc_create_stream_for_sink(sink);
6668 
6669 	if (stream == NULL) {
6670 		DRM_ERROR("Failed to create stream for sink!\n");
6671 		goto finish;
6672 	}
6673 
6674 	stream->dm_stream_context = aconnector;
6675 
6676 	stream->timing.flags.LTE_340MCSC_SCRAMBLE =
6677 		drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
6678 
6679 	list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
6680 		/* Search for preferred mode */
6681 		if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
6682 			native_mode_found = true;
6683 			break;
6684 		}
6685 	}
6686 	if (!native_mode_found)
6687 		preferred_mode = list_first_entry_or_null(
6688 				&aconnector->base.modes,
6689 				struct drm_display_mode,
6690 				head);
6691 
6692 	mode_refresh = drm_mode_vrefresh(&mode);
6693 
6694 	if (preferred_mode == NULL) {
6695 		/*
6696 		 * This may not be an error, the use case is when we have no
6697 		 * usermode calls to reset and set mode upon hotplug. In this
6698 		 * case, we call set mode ourselves to restore the previous mode
6699 		 * and the modelist may not be filled in in time.
6700 		 */
6701 		DRM_DEBUG_DRIVER("No preferred mode found\n");
6702 	} else {
6703 		recalculate_timing = is_freesync_video_mode(&mode, aconnector);
6704 		if (recalculate_timing) {
6705 			freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
6706 			drm_mode_copy(&saved_mode, &mode);
6707 			drm_mode_copy(&mode, freesync_mode);
6708 		} else {
6709 			decide_crtc_timing_for_drm_display_mode(
6710 				&mode, preferred_mode, scale);
6711 
6712 			preferred_refresh = drm_mode_vrefresh(preferred_mode);
6713 		}
6714 	}
6715 
6716 	if (recalculate_timing)
6717 		drm_mode_set_crtcinfo(&saved_mode, 0);
6718 	else if (!dm_state)
6719 		drm_mode_set_crtcinfo(&mode, 0);
6720 
6721        /*
6722 	* If scaling is enabled and refresh rate didn't change
6723 	* we copy the vic and polarities of the old timings
6724 	*/
6725 	if (!scale || mode_refresh != preferred_refresh)
6726 		fill_stream_properties_from_drm_display_mode(
6727 			stream, &mode, &aconnector->base, con_state, NULL,
6728 			requested_bpc);
6729 	else
6730 		fill_stream_properties_from_drm_display_mode(
6731 			stream, &mode, &aconnector->base, con_state, old_stream,
6732 			requested_bpc);
6733 
6734 #if defined(CONFIG_DRM_AMD_DC_DCN)
6735 	/* SST DSC determination policy */
6736 	update_dsc_caps(aconnector, sink, stream, &dsc_caps);
6737 	if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported)
6738 		apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps);
6739 #endif
6740 
6741 	update_stream_scaling_settings(&mode, dm_state, stream);
6742 
6743 	fill_audio_info(
6744 		&stream->audio_info,
6745 		drm_connector,
6746 		sink);
6747 
6748 	update_stream_signal(stream, sink);
6749 
6750 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
6751 		mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
6752 
6753 	if (stream->link->psr_settings.psr_feature_enabled) {
6754 		//
6755 		// should decide stream support vsc sdp colorimetry capability
6756 		// before building vsc info packet
6757 		//
6758 		stream->use_vsc_sdp_for_colorimetry = false;
6759 		if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
6760 			stream->use_vsc_sdp_for_colorimetry =
6761 				aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
6762 		} else {
6763 			if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
6764 				stream->use_vsc_sdp_for_colorimetry = true;
6765 		}
6766 		mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space);
6767 		aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
6768 
6769 	}
6770 finish:
6771 	dc_sink_release(sink);
6772 
6773 	return stream;
6774 }
6775 
6776 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
6777 {
6778 	drm_crtc_cleanup(crtc);
6779 	kfree(crtc);
6780 }
6781 
6782 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
6783 				  struct drm_crtc_state *state)
6784 {
6785 	struct dm_crtc_state *cur = to_dm_crtc_state(state);
6786 
6787 	/* TODO Destroy dc_stream objects are stream object is flattened */
6788 	if (cur->stream)
6789 		dc_stream_release(cur->stream);
6790 
6791 
6792 	__drm_atomic_helper_crtc_destroy_state(state);
6793 
6794 
6795 	kfree(state);
6796 }
6797 
6798 static void dm_crtc_reset_state(struct drm_crtc *crtc)
6799 {
6800 	struct dm_crtc_state *state;
6801 
6802 	if (crtc->state)
6803 		dm_crtc_destroy_state(crtc, crtc->state);
6804 
6805 	state = kzalloc(sizeof(*state), GFP_KERNEL);
6806 	if (WARN_ON(!state))
6807 		return;
6808 
6809 	__drm_atomic_helper_crtc_reset(crtc, &state->base);
6810 }
6811 
6812 static struct drm_crtc_state *
6813 dm_crtc_duplicate_state(struct drm_crtc *crtc)
6814 {
6815 	struct dm_crtc_state *state, *cur;
6816 
6817 	cur = to_dm_crtc_state(crtc->state);
6818 
6819 	if (WARN_ON(!crtc->state))
6820 		return NULL;
6821 
6822 	state = kzalloc(sizeof(*state), GFP_KERNEL);
6823 	if (!state)
6824 		return NULL;
6825 
6826 	__drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
6827 
6828 	if (cur->stream) {
6829 		state->stream = cur->stream;
6830 		dc_stream_retain(state->stream);
6831 	}
6832 
6833 	state->active_planes = cur->active_planes;
6834 	state->vrr_infopacket = cur->vrr_infopacket;
6835 	state->abm_level = cur->abm_level;
6836 	state->vrr_supported = cur->vrr_supported;
6837 	state->freesync_config = cur->freesync_config;
6838 	state->cm_has_degamma = cur->cm_has_degamma;
6839 	state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
6840 	state->mpo_requested = cur->mpo_requested;
6841 	/* TODO Duplicate dc_stream after objects are stream object is flattened */
6842 
6843 	return &state->base;
6844 }
6845 
6846 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
6847 static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
6848 {
6849 	crtc_debugfs_init(crtc);
6850 
6851 	return 0;
6852 }
6853 #endif
6854 
6855 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
6856 {
6857 	enum dc_irq_source irq_source;
6858 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6859 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6860 	int rc;
6861 
6862 	irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
6863 
6864 	rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
6865 
6866 	DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
6867 		      acrtc->crtc_id, enable ? "en" : "dis", rc);
6868 	return rc;
6869 }
6870 
6871 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
6872 {
6873 	enum dc_irq_source irq_source;
6874 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6875 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6876 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
6877 	struct amdgpu_display_manager *dm = &adev->dm;
6878 	struct vblank_control_work *work;
6879 	int rc = 0;
6880 
6881 	if (enable) {
6882 		/* vblank irq on -> Only need vupdate irq in vrr mode */
6883 		if (amdgpu_dm_vrr_active(acrtc_state))
6884 			rc = dm_set_vupdate_irq(crtc, true);
6885 	} else {
6886 		/* vblank irq off -> vupdate irq off */
6887 		rc = dm_set_vupdate_irq(crtc, false);
6888 	}
6889 
6890 	if (rc)
6891 		return rc;
6892 
6893 	irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
6894 
6895 	if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
6896 		return -EBUSY;
6897 
6898 	if (amdgpu_in_reset(adev))
6899 		return 0;
6900 
6901 	if (dm->vblank_control_workqueue) {
6902 		work = kzalloc(sizeof(*work), GFP_ATOMIC);
6903 		if (!work)
6904 			return -ENOMEM;
6905 
6906 		INIT_WORK(&work->work, vblank_control_worker);
6907 		work->dm = dm;
6908 		work->acrtc = acrtc;
6909 		work->enable = enable;
6910 
6911 		if (acrtc_state->stream) {
6912 			dc_stream_retain(acrtc_state->stream);
6913 			work->stream = acrtc_state->stream;
6914 		}
6915 
6916 		queue_work(dm->vblank_control_workqueue, &work->work);
6917 	}
6918 
6919 	return 0;
6920 }
6921 
6922 static int dm_enable_vblank(struct drm_crtc *crtc)
6923 {
6924 	return dm_set_vblank(crtc, true);
6925 }
6926 
6927 static void dm_disable_vblank(struct drm_crtc *crtc)
6928 {
6929 	dm_set_vblank(crtc, false);
6930 }
6931 
6932 /* Implemented only the options currently available for the driver */
6933 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
6934 	.reset = dm_crtc_reset_state,
6935 	.destroy = amdgpu_dm_crtc_destroy,
6936 	.set_config = drm_atomic_helper_set_config,
6937 	.page_flip = drm_atomic_helper_page_flip,
6938 	.atomic_duplicate_state = dm_crtc_duplicate_state,
6939 	.atomic_destroy_state = dm_crtc_destroy_state,
6940 	.set_crc_source = amdgpu_dm_crtc_set_crc_source,
6941 	.verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
6942 	.get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
6943 	.get_vblank_counter = amdgpu_get_vblank_counter_kms,
6944 	.enable_vblank = dm_enable_vblank,
6945 	.disable_vblank = dm_disable_vblank,
6946 	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
6947 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
6948 	.late_register = amdgpu_dm_crtc_late_register,
6949 #endif
6950 };
6951 
6952 static enum drm_connector_status
6953 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
6954 {
6955 	bool connected;
6956 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6957 
6958 	/*
6959 	 * Notes:
6960 	 * 1. This interface is NOT called in context of HPD irq.
6961 	 * 2. This interface *is called* in context of user-mode ioctl. Which
6962 	 * makes it a bad place for *any* MST-related activity.
6963 	 */
6964 
6965 	if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
6966 	    !aconnector->fake_enable)
6967 		connected = (aconnector->dc_sink != NULL);
6968 	else
6969 		connected = (aconnector->base.force == DRM_FORCE_ON);
6970 
6971 	update_subconnector_property(aconnector);
6972 
6973 	return (connected ? connector_status_connected :
6974 			connector_status_disconnected);
6975 }
6976 
6977 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
6978 					    struct drm_connector_state *connector_state,
6979 					    struct drm_property *property,
6980 					    uint64_t val)
6981 {
6982 	struct drm_device *dev = connector->dev;
6983 	struct amdgpu_device *adev = drm_to_adev(dev);
6984 	struct dm_connector_state *dm_old_state =
6985 		to_dm_connector_state(connector->state);
6986 	struct dm_connector_state *dm_new_state =
6987 		to_dm_connector_state(connector_state);
6988 
6989 	int ret = -EINVAL;
6990 
6991 	if (property == dev->mode_config.scaling_mode_property) {
6992 		enum amdgpu_rmx_type rmx_type;
6993 
6994 		switch (val) {
6995 		case DRM_MODE_SCALE_CENTER:
6996 			rmx_type = RMX_CENTER;
6997 			break;
6998 		case DRM_MODE_SCALE_ASPECT:
6999 			rmx_type = RMX_ASPECT;
7000 			break;
7001 		case DRM_MODE_SCALE_FULLSCREEN:
7002 			rmx_type = RMX_FULL;
7003 			break;
7004 		case DRM_MODE_SCALE_NONE:
7005 		default:
7006 			rmx_type = RMX_OFF;
7007 			break;
7008 		}
7009 
7010 		if (dm_old_state->scaling == rmx_type)
7011 			return 0;
7012 
7013 		dm_new_state->scaling = rmx_type;
7014 		ret = 0;
7015 	} else if (property == adev->mode_info.underscan_hborder_property) {
7016 		dm_new_state->underscan_hborder = val;
7017 		ret = 0;
7018 	} else if (property == adev->mode_info.underscan_vborder_property) {
7019 		dm_new_state->underscan_vborder = val;
7020 		ret = 0;
7021 	} else if (property == adev->mode_info.underscan_property) {
7022 		dm_new_state->underscan_enable = val;
7023 		ret = 0;
7024 	} else if (property == adev->mode_info.abm_level_property) {
7025 		dm_new_state->abm_level = val;
7026 		ret = 0;
7027 	}
7028 
7029 	return ret;
7030 }
7031 
7032 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
7033 					    const struct drm_connector_state *state,
7034 					    struct drm_property *property,
7035 					    uint64_t *val)
7036 {
7037 	struct drm_device *dev = connector->dev;
7038 	struct amdgpu_device *adev = drm_to_adev(dev);
7039 	struct dm_connector_state *dm_state =
7040 		to_dm_connector_state(state);
7041 	int ret = -EINVAL;
7042 
7043 	if (property == dev->mode_config.scaling_mode_property) {
7044 		switch (dm_state->scaling) {
7045 		case RMX_CENTER:
7046 			*val = DRM_MODE_SCALE_CENTER;
7047 			break;
7048 		case RMX_ASPECT:
7049 			*val = DRM_MODE_SCALE_ASPECT;
7050 			break;
7051 		case RMX_FULL:
7052 			*val = DRM_MODE_SCALE_FULLSCREEN;
7053 			break;
7054 		case RMX_OFF:
7055 		default:
7056 			*val = DRM_MODE_SCALE_NONE;
7057 			break;
7058 		}
7059 		ret = 0;
7060 	} else if (property == adev->mode_info.underscan_hborder_property) {
7061 		*val = dm_state->underscan_hborder;
7062 		ret = 0;
7063 	} else if (property == adev->mode_info.underscan_vborder_property) {
7064 		*val = dm_state->underscan_vborder;
7065 		ret = 0;
7066 	} else if (property == adev->mode_info.underscan_property) {
7067 		*val = dm_state->underscan_enable;
7068 		ret = 0;
7069 	} else if (property == adev->mode_info.abm_level_property) {
7070 		*val = dm_state->abm_level;
7071 		ret = 0;
7072 	}
7073 
7074 	return ret;
7075 }
7076 
7077 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
7078 {
7079 	struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
7080 
7081 	drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
7082 }
7083 
7084 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
7085 {
7086 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7087 	const struct dc_link *link = aconnector->dc_link;
7088 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
7089 	struct amdgpu_display_manager *dm = &adev->dm;
7090 	int i;
7091 
7092 	/*
7093 	 * Call only if mst_mgr was iniitalized before since it's not done
7094 	 * for all connector types.
7095 	 */
7096 	if (aconnector->mst_mgr.dev)
7097 		drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
7098 
7099 	for (i = 0; i < dm->num_of_edps; i++) {
7100 		if ((link == dm->backlight_link[i]) && dm->backlight_dev[i]) {
7101 			backlight_device_unregister(dm->backlight_dev[i]);
7102 			dm->backlight_dev[i] = NULL;
7103 		}
7104 	}
7105 
7106 	if (aconnector->dc_em_sink)
7107 		dc_sink_release(aconnector->dc_em_sink);
7108 	aconnector->dc_em_sink = NULL;
7109 	if (aconnector->dc_sink)
7110 		dc_sink_release(aconnector->dc_sink);
7111 	aconnector->dc_sink = NULL;
7112 
7113 	drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
7114 	drm_connector_unregister(connector);
7115 	drm_connector_cleanup(connector);
7116 	if (aconnector->i2c) {
7117 		i2c_del_adapter(&aconnector->i2c->base);
7118 		kfree(aconnector->i2c);
7119 	}
7120 	kfree(aconnector->dm_dp_aux.aux.name);
7121 
7122 	kfree(connector);
7123 }
7124 
7125 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
7126 {
7127 	struct dm_connector_state *state =
7128 		to_dm_connector_state(connector->state);
7129 
7130 	if (connector->state)
7131 		__drm_atomic_helper_connector_destroy_state(connector->state);
7132 
7133 	kfree(state);
7134 
7135 	state = kzalloc(sizeof(*state), GFP_KERNEL);
7136 
7137 	if (state) {
7138 		state->scaling = RMX_OFF;
7139 		state->underscan_enable = false;
7140 		state->underscan_hborder = 0;
7141 		state->underscan_vborder = 0;
7142 		state->base.max_requested_bpc = 8;
7143 		state->vcpi_slots = 0;
7144 		state->pbn = 0;
7145 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
7146 			state->abm_level = amdgpu_dm_abm_level;
7147 
7148 		__drm_atomic_helper_connector_reset(connector, &state->base);
7149 	}
7150 }
7151 
7152 struct drm_connector_state *
7153 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
7154 {
7155 	struct dm_connector_state *state =
7156 		to_dm_connector_state(connector->state);
7157 
7158 	struct dm_connector_state *new_state =
7159 			kmemdup(state, sizeof(*state), GFP_KERNEL);
7160 
7161 	if (!new_state)
7162 		return NULL;
7163 
7164 	__drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
7165 
7166 	new_state->freesync_capable = state->freesync_capable;
7167 	new_state->abm_level = state->abm_level;
7168 	new_state->scaling = state->scaling;
7169 	new_state->underscan_enable = state->underscan_enable;
7170 	new_state->underscan_hborder = state->underscan_hborder;
7171 	new_state->underscan_vborder = state->underscan_vborder;
7172 	new_state->vcpi_slots = state->vcpi_slots;
7173 	new_state->pbn = state->pbn;
7174 	return &new_state->base;
7175 }
7176 
7177 static int
7178 amdgpu_dm_connector_late_register(struct drm_connector *connector)
7179 {
7180 	struct amdgpu_dm_connector *amdgpu_dm_connector =
7181 		to_amdgpu_dm_connector(connector);
7182 	int r;
7183 
7184 	if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
7185 	    (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
7186 		amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
7187 		r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
7188 		if (r)
7189 			return r;
7190 	}
7191 
7192 #if defined(CONFIG_DEBUG_FS)
7193 	connector_debugfs_init(amdgpu_dm_connector);
7194 #endif
7195 
7196 	return 0;
7197 }
7198 
7199 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
7200 	.reset = amdgpu_dm_connector_funcs_reset,
7201 	.detect = amdgpu_dm_connector_detect,
7202 	.fill_modes = drm_helper_probe_single_connector_modes,
7203 	.destroy = amdgpu_dm_connector_destroy,
7204 	.atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
7205 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
7206 	.atomic_set_property = amdgpu_dm_connector_atomic_set_property,
7207 	.atomic_get_property = amdgpu_dm_connector_atomic_get_property,
7208 	.late_register = amdgpu_dm_connector_late_register,
7209 	.early_unregister = amdgpu_dm_connector_unregister
7210 };
7211 
7212 static int get_modes(struct drm_connector *connector)
7213 {
7214 	return amdgpu_dm_connector_get_modes(connector);
7215 }
7216 
7217 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
7218 {
7219 	struct dc_sink_init_data init_params = {
7220 			.link = aconnector->dc_link,
7221 			.sink_signal = SIGNAL_TYPE_VIRTUAL
7222 	};
7223 	struct edid *edid;
7224 
7225 	if (!aconnector->base.edid_blob_ptr) {
7226 		DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
7227 				aconnector->base.name);
7228 
7229 		aconnector->base.force = DRM_FORCE_OFF;
7230 		aconnector->base.override_edid = false;
7231 		return;
7232 	}
7233 
7234 	edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
7235 
7236 	aconnector->edid = edid;
7237 
7238 	aconnector->dc_em_sink = dc_link_add_remote_sink(
7239 		aconnector->dc_link,
7240 		(uint8_t *)edid,
7241 		(edid->extensions + 1) * EDID_LENGTH,
7242 		&init_params);
7243 
7244 	if (aconnector->base.force == DRM_FORCE_ON) {
7245 		aconnector->dc_sink = aconnector->dc_link->local_sink ?
7246 		aconnector->dc_link->local_sink :
7247 		aconnector->dc_em_sink;
7248 		dc_sink_retain(aconnector->dc_sink);
7249 	}
7250 }
7251 
7252 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
7253 {
7254 	struct dc_link *link = (struct dc_link *)aconnector->dc_link;
7255 
7256 	/*
7257 	 * In case of headless boot with force on for DP managed connector
7258 	 * Those settings have to be != 0 to get initial modeset
7259 	 */
7260 	if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
7261 		link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
7262 		link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
7263 	}
7264 
7265 
7266 	aconnector->base.override_edid = true;
7267 	create_eml_sink(aconnector);
7268 }
7269 
7270 struct dc_stream_state *
7271 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
7272 				const struct drm_display_mode *drm_mode,
7273 				const struct dm_connector_state *dm_state,
7274 				const struct dc_stream_state *old_stream)
7275 {
7276 	struct drm_connector *connector = &aconnector->base;
7277 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
7278 	struct dc_stream_state *stream;
7279 	const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
7280 	int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
7281 	enum dc_status dc_result = DC_OK;
7282 
7283 	do {
7284 		stream = create_stream_for_sink(aconnector, drm_mode,
7285 						dm_state, old_stream,
7286 						requested_bpc);
7287 		if (stream == NULL) {
7288 			DRM_ERROR("Failed to create stream for sink!\n");
7289 			break;
7290 		}
7291 
7292 		dc_result = dc_validate_stream(adev->dm.dc, stream);
7293 		if (dc_result == DC_OK && stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
7294 			dc_result = dm_dp_mst_is_port_support_mode(aconnector, stream);
7295 
7296 		if (dc_result != DC_OK) {
7297 			DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
7298 				      drm_mode->hdisplay,
7299 				      drm_mode->vdisplay,
7300 				      drm_mode->clock,
7301 				      dc_result,
7302 				      dc_status_to_str(dc_result));
7303 
7304 			dc_stream_release(stream);
7305 			stream = NULL;
7306 			requested_bpc -= 2; /* lower bpc to retry validation */
7307 		}
7308 
7309 	} while (stream == NULL && requested_bpc >= 6);
7310 
7311 	if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
7312 		DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
7313 
7314 		aconnector->force_yuv420_output = true;
7315 		stream = create_validate_stream_for_sink(aconnector, drm_mode,
7316 						dm_state, old_stream);
7317 		aconnector->force_yuv420_output = false;
7318 	}
7319 
7320 	return stream;
7321 }
7322 
7323 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
7324 				   struct drm_display_mode *mode)
7325 {
7326 	int result = MODE_ERROR;
7327 	struct dc_sink *dc_sink;
7328 	/* TODO: Unhardcode stream count */
7329 	struct dc_stream_state *stream;
7330 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7331 
7332 	if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
7333 			(mode->flags & DRM_MODE_FLAG_DBLSCAN))
7334 		return result;
7335 
7336 	/*
7337 	 * Only run this the first time mode_valid is called to initilialize
7338 	 * EDID mgmt
7339 	 */
7340 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
7341 		!aconnector->dc_em_sink)
7342 		handle_edid_mgmt(aconnector);
7343 
7344 	dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
7345 
7346 	if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
7347 				aconnector->base.force != DRM_FORCE_ON) {
7348 		DRM_ERROR("dc_sink is NULL!\n");
7349 		goto fail;
7350 	}
7351 
7352 	stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
7353 	if (stream) {
7354 		dc_stream_release(stream);
7355 		result = MODE_OK;
7356 	}
7357 
7358 fail:
7359 	/* TODO: error handling*/
7360 	return result;
7361 }
7362 
7363 static int fill_hdr_info_packet(const struct drm_connector_state *state,
7364 				struct dc_info_packet *out)
7365 {
7366 	struct hdmi_drm_infoframe frame;
7367 	unsigned char buf[30]; /* 26 + 4 */
7368 	ssize_t len;
7369 	int ret, i;
7370 
7371 	memset(out, 0, sizeof(*out));
7372 
7373 	if (!state->hdr_output_metadata)
7374 		return 0;
7375 
7376 	ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
7377 	if (ret)
7378 		return ret;
7379 
7380 	len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
7381 	if (len < 0)
7382 		return (int)len;
7383 
7384 	/* Static metadata is a fixed 26 bytes + 4 byte header. */
7385 	if (len != 30)
7386 		return -EINVAL;
7387 
7388 	/* Prepare the infopacket for DC. */
7389 	switch (state->connector->connector_type) {
7390 	case DRM_MODE_CONNECTOR_HDMIA:
7391 		out->hb0 = 0x87; /* type */
7392 		out->hb1 = 0x01; /* version */
7393 		out->hb2 = 0x1A; /* length */
7394 		out->sb[0] = buf[3]; /* checksum */
7395 		i = 1;
7396 		break;
7397 
7398 	case DRM_MODE_CONNECTOR_DisplayPort:
7399 	case DRM_MODE_CONNECTOR_eDP:
7400 		out->hb0 = 0x00; /* sdp id, zero */
7401 		out->hb1 = 0x87; /* type */
7402 		out->hb2 = 0x1D; /* payload len - 1 */
7403 		out->hb3 = (0x13 << 2); /* sdp version */
7404 		out->sb[0] = 0x01; /* version */
7405 		out->sb[1] = 0x1A; /* length */
7406 		i = 2;
7407 		break;
7408 
7409 	default:
7410 		return -EINVAL;
7411 	}
7412 
7413 	memcpy(&out->sb[i], &buf[4], 26);
7414 	out->valid = true;
7415 
7416 	print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
7417 		       sizeof(out->sb), false);
7418 
7419 	return 0;
7420 }
7421 
7422 static int
7423 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
7424 				 struct drm_atomic_state *state)
7425 {
7426 	struct drm_connector_state *new_con_state =
7427 		drm_atomic_get_new_connector_state(state, conn);
7428 	struct drm_connector_state *old_con_state =
7429 		drm_atomic_get_old_connector_state(state, conn);
7430 	struct drm_crtc *crtc = new_con_state->crtc;
7431 	struct drm_crtc_state *new_crtc_state;
7432 	int ret;
7433 
7434 	trace_amdgpu_dm_connector_atomic_check(new_con_state);
7435 
7436 	if (!crtc)
7437 		return 0;
7438 
7439 	if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
7440 		struct dc_info_packet hdr_infopacket;
7441 
7442 		ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
7443 		if (ret)
7444 			return ret;
7445 
7446 		new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
7447 		if (IS_ERR(new_crtc_state))
7448 			return PTR_ERR(new_crtc_state);
7449 
7450 		/*
7451 		 * DC considers the stream backends changed if the
7452 		 * static metadata changes. Forcing the modeset also
7453 		 * gives a simple way for userspace to switch from
7454 		 * 8bpc to 10bpc when setting the metadata to enter
7455 		 * or exit HDR.
7456 		 *
7457 		 * Changing the static metadata after it's been
7458 		 * set is permissible, however. So only force a
7459 		 * modeset if we're entering or exiting HDR.
7460 		 */
7461 		new_crtc_state->mode_changed =
7462 			!old_con_state->hdr_output_metadata ||
7463 			!new_con_state->hdr_output_metadata;
7464 	}
7465 
7466 	return 0;
7467 }
7468 
7469 static const struct drm_connector_helper_funcs
7470 amdgpu_dm_connector_helper_funcs = {
7471 	/*
7472 	 * If hotplugging a second bigger display in FB Con mode, bigger resolution
7473 	 * modes will be filtered by drm_mode_validate_size(), and those modes
7474 	 * are missing after user start lightdm. So we need to renew modes list.
7475 	 * in get_modes call back, not just return the modes count
7476 	 */
7477 	.get_modes = get_modes,
7478 	.mode_valid = amdgpu_dm_connector_mode_valid,
7479 	.atomic_check = amdgpu_dm_connector_atomic_check,
7480 };
7481 
7482 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
7483 {
7484 }
7485 
7486 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
7487 {
7488 	struct drm_atomic_state *state = new_crtc_state->state;
7489 	struct drm_plane *plane;
7490 	int num_active = 0;
7491 
7492 	drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
7493 		struct drm_plane_state *new_plane_state;
7494 
7495 		/* Cursor planes are "fake". */
7496 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
7497 			continue;
7498 
7499 		new_plane_state = drm_atomic_get_new_plane_state(state, plane);
7500 
7501 		if (!new_plane_state) {
7502 			/*
7503 			 * The plane is enable on the CRTC and hasn't changed
7504 			 * state. This means that it previously passed
7505 			 * validation and is therefore enabled.
7506 			 */
7507 			num_active += 1;
7508 			continue;
7509 		}
7510 
7511 		/* We need a framebuffer to be considered enabled. */
7512 		num_active += (new_plane_state->fb != NULL);
7513 	}
7514 
7515 	return num_active;
7516 }
7517 
7518 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
7519 					 struct drm_crtc_state *new_crtc_state)
7520 {
7521 	struct dm_crtc_state *dm_new_crtc_state =
7522 		to_dm_crtc_state(new_crtc_state);
7523 
7524 	dm_new_crtc_state->active_planes = 0;
7525 
7526 	if (!dm_new_crtc_state->stream)
7527 		return;
7528 
7529 	dm_new_crtc_state->active_planes =
7530 		count_crtc_active_planes(new_crtc_state);
7531 }
7532 
7533 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
7534 				       struct drm_atomic_state *state)
7535 {
7536 	struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
7537 									  crtc);
7538 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
7539 	struct dc *dc = adev->dm.dc;
7540 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
7541 	int ret = -EINVAL;
7542 
7543 	trace_amdgpu_dm_crtc_atomic_check(crtc_state);
7544 
7545 	dm_update_crtc_active_planes(crtc, crtc_state);
7546 
7547 	if (WARN_ON(unlikely(!dm_crtc_state->stream &&
7548 		     modeset_required(crtc_state, NULL, dm_crtc_state->stream)))) {
7549 		return ret;
7550 	}
7551 
7552 	/*
7553 	 * We require the primary plane to be enabled whenever the CRTC is, otherwise
7554 	 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
7555 	 * planes are disabled, which is not supported by the hardware. And there is legacy
7556 	 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
7557 	 */
7558 	if (crtc_state->enable &&
7559 	    !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
7560 		DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
7561 		return -EINVAL;
7562 	}
7563 
7564 	/* In some use cases, like reset, no stream is attached */
7565 	if (!dm_crtc_state->stream)
7566 		return 0;
7567 
7568 	if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
7569 		return 0;
7570 
7571 	DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
7572 	return ret;
7573 }
7574 
7575 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
7576 				      const struct drm_display_mode *mode,
7577 				      struct drm_display_mode *adjusted_mode)
7578 {
7579 	return true;
7580 }
7581 
7582 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
7583 	.disable = dm_crtc_helper_disable,
7584 	.atomic_check = dm_crtc_helper_atomic_check,
7585 	.mode_fixup = dm_crtc_helper_mode_fixup,
7586 	.get_scanout_position = amdgpu_crtc_get_scanout_position,
7587 };
7588 
7589 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
7590 {
7591 
7592 }
7593 
7594 int convert_dc_color_depth_into_bpc(enum dc_color_depth display_color_depth)
7595 {
7596 	switch (display_color_depth) {
7597 		case COLOR_DEPTH_666:
7598 			return 6;
7599 		case COLOR_DEPTH_888:
7600 			return 8;
7601 		case COLOR_DEPTH_101010:
7602 			return 10;
7603 		case COLOR_DEPTH_121212:
7604 			return 12;
7605 		case COLOR_DEPTH_141414:
7606 			return 14;
7607 		case COLOR_DEPTH_161616:
7608 			return 16;
7609 		default:
7610 			break;
7611 		}
7612 	return 0;
7613 }
7614 
7615 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
7616 					  struct drm_crtc_state *crtc_state,
7617 					  struct drm_connector_state *conn_state)
7618 {
7619 	struct drm_atomic_state *state = crtc_state->state;
7620 	struct drm_connector *connector = conn_state->connector;
7621 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7622 	struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
7623 	const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
7624 	struct drm_dp_mst_topology_mgr *mst_mgr;
7625 	struct drm_dp_mst_port *mst_port;
7626 	enum dc_color_depth color_depth;
7627 	int clock, bpp = 0;
7628 	bool is_y420 = false;
7629 
7630 	if (!aconnector->port || !aconnector->dc_sink)
7631 		return 0;
7632 
7633 	mst_port = aconnector->port;
7634 	mst_mgr = &aconnector->mst_port->mst_mgr;
7635 
7636 	if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
7637 		return 0;
7638 
7639 	if (!state->duplicated) {
7640 		int max_bpc = conn_state->max_requested_bpc;
7641 		is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
7642 				aconnector->force_yuv420_output;
7643 		color_depth = convert_color_depth_from_display_info(connector,
7644 								    is_y420,
7645 								    max_bpc);
7646 		bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
7647 		clock = adjusted_mode->clock;
7648 		dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
7649 	}
7650 	dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
7651 									   mst_mgr,
7652 									   mst_port,
7653 									   dm_new_connector_state->pbn,
7654 									   dm_mst_get_pbn_divider(aconnector->dc_link));
7655 	if (dm_new_connector_state->vcpi_slots < 0) {
7656 		DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
7657 		return dm_new_connector_state->vcpi_slots;
7658 	}
7659 	return 0;
7660 }
7661 
7662 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
7663 	.disable = dm_encoder_helper_disable,
7664 	.atomic_check = dm_encoder_helper_atomic_check
7665 };
7666 
7667 #if defined(CONFIG_DRM_AMD_DC_DCN)
7668 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
7669 					    struct dc_state *dc_state,
7670 					    struct dsc_mst_fairness_vars *vars)
7671 {
7672 	struct dc_stream_state *stream = NULL;
7673 	struct drm_connector *connector;
7674 	struct drm_connector_state *new_con_state;
7675 	struct amdgpu_dm_connector *aconnector;
7676 	struct dm_connector_state *dm_conn_state;
7677 	int i, j;
7678 	int vcpi, pbn_div, pbn, slot_num = 0;
7679 
7680 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
7681 
7682 		aconnector = to_amdgpu_dm_connector(connector);
7683 
7684 		if (!aconnector->port)
7685 			continue;
7686 
7687 		if (!new_con_state || !new_con_state->crtc)
7688 			continue;
7689 
7690 		dm_conn_state = to_dm_connector_state(new_con_state);
7691 
7692 		for (j = 0; j < dc_state->stream_count; j++) {
7693 			stream = dc_state->streams[j];
7694 			if (!stream)
7695 				continue;
7696 
7697 			if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
7698 				break;
7699 
7700 			stream = NULL;
7701 		}
7702 
7703 		if (!stream)
7704 			continue;
7705 
7706 		pbn_div = dm_mst_get_pbn_divider(stream->link);
7707 		/* pbn is calculated by compute_mst_dsc_configs_for_state*/
7708 		for (j = 0; j < dc_state->stream_count; j++) {
7709 			if (vars[j].aconnector == aconnector) {
7710 				pbn = vars[j].pbn;
7711 				break;
7712 			}
7713 		}
7714 
7715 		if (j == dc_state->stream_count)
7716 			continue;
7717 
7718 		slot_num = DIV_ROUND_UP(pbn, pbn_div);
7719 
7720 		if (stream->timing.flags.DSC != 1) {
7721 			dm_conn_state->pbn = pbn;
7722 			dm_conn_state->vcpi_slots = slot_num;
7723 
7724 			drm_dp_mst_atomic_enable_dsc(state,
7725 						     aconnector->port,
7726 						     dm_conn_state->pbn,
7727 						     0,
7728 						     false);
7729 			continue;
7730 		}
7731 
7732 		vcpi = drm_dp_mst_atomic_enable_dsc(state,
7733 						    aconnector->port,
7734 						    pbn, pbn_div,
7735 						    true);
7736 		if (vcpi < 0)
7737 			return vcpi;
7738 
7739 		dm_conn_state->pbn = pbn;
7740 		dm_conn_state->vcpi_slots = vcpi;
7741 	}
7742 	return 0;
7743 }
7744 #endif
7745 
7746 static void dm_drm_plane_reset(struct drm_plane *plane)
7747 {
7748 	struct dm_plane_state *amdgpu_state = NULL;
7749 
7750 	if (plane->state)
7751 		plane->funcs->atomic_destroy_state(plane, plane->state);
7752 
7753 	amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
7754 	WARN_ON(amdgpu_state == NULL);
7755 
7756 	if (amdgpu_state)
7757 		__drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
7758 }
7759 
7760 static struct drm_plane_state *
7761 dm_drm_plane_duplicate_state(struct drm_plane *plane)
7762 {
7763 	struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
7764 
7765 	old_dm_plane_state = to_dm_plane_state(plane->state);
7766 	dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
7767 	if (!dm_plane_state)
7768 		return NULL;
7769 
7770 	__drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
7771 
7772 	if (old_dm_plane_state->dc_state) {
7773 		dm_plane_state->dc_state = old_dm_plane_state->dc_state;
7774 		dc_plane_state_retain(dm_plane_state->dc_state);
7775 	}
7776 
7777 	return &dm_plane_state->base;
7778 }
7779 
7780 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
7781 				struct drm_plane_state *state)
7782 {
7783 	struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
7784 
7785 	if (dm_plane_state->dc_state)
7786 		dc_plane_state_release(dm_plane_state->dc_state);
7787 
7788 	drm_atomic_helper_plane_destroy_state(plane, state);
7789 }
7790 
7791 static const struct drm_plane_funcs dm_plane_funcs = {
7792 	.update_plane	= drm_atomic_helper_update_plane,
7793 	.disable_plane	= drm_atomic_helper_disable_plane,
7794 	.destroy	= drm_primary_helper_destroy,
7795 	.reset = dm_drm_plane_reset,
7796 	.atomic_duplicate_state = dm_drm_plane_duplicate_state,
7797 	.atomic_destroy_state = dm_drm_plane_destroy_state,
7798 	.format_mod_supported = dm_plane_format_mod_supported,
7799 };
7800 
7801 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
7802 				      struct drm_plane_state *new_state)
7803 {
7804 	struct amdgpu_framebuffer *afb;
7805 	struct drm_gem_object *obj;
7806 	struct amdgpu_device *adev;
7807 	struct amdgpu_bo *rbo;
7808 	struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
7809 	uint32_t domain;
7810 	int r;
7811 
7812 	if (!new_state->fb) {
7813 		DRM_DEBUG_KMS("No FB bound\n");
7814 		return 0;
7815 	}
7816 
7817 	afb = to_amdgpu_framebuffer(new_state->fb);
7818 	obj = new_state->fb->obj[0];
7819 	rbo = gem_to_amdgpu_bo(obj);
7820 	adev = amdgpu_ttm_adev(rbo->tbo.bdev);
7821 
7822 	r = amdgpu_bo_reserve(rbo, true);
7823 	if (r) {
7824 		dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
7825 		return r;
7826 	}
7827 
7828 	r = dma_resv_reserve_fences(rbo->tbo.base.resv, 1);
7829 	if (r) {
7830 		dev_err(adev->dev, "reserving fence slot failed (%d)\n", r);
7831 		goto error_unlock;
7832 	}
7833 
7834 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
7835 		domain = amdgpu_display_supported_domains(adev, rbo->flags);
7836 	else
7837 		domain = AMDGPU_GEM_DOMAIN_VRAM;
7838 
7839 	r = amdgpu_bo_pin(rbo, domain);
7840 	if (unlikely(r != 0)) {
7841 		if (r != -ERESTARTSYS)
7842 			DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
7843 		goto error_unlock;
7844 	}
7845 
7846 	r = amdgpu_ttm_alloc_gart(&rbo->tbo);
7847 	if (unlikely(r != 0)) {
7848 		DRM_ERROR("%p bind failed\n", rbo);
7849 		goto error_unpin;
7850 	}
7851 
7852 	amdgpu_bo_unreserve(rbo);
7853 
7854 	afb->address = amdgpu_bo_gpu_offset(rbo);
7855 
7856 	amdgpu_bo_ref(rbo);
7857 
7858 	/**
7859 	 * We don't do surface updates on planes that have been newly created,
7860 	 * but we also don't have the afb->address during atomic check.
7861 	 *
7862 	 * Fill in buffer attributes depending on the address here, but only on
7863 	 * newly created planes since they're not being used by DC yet and this
7864 	 * won't modify global state.
7865 	 */
7866 	dm_plane_state_old = to_dm_plane_state(plane->state);
7867 	dm_plane_state_new = to_dm_plane_state(new_state);
7868 
7869 	if (dm_plane_state_new->dc_state &&
7870 	    dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
7871 		struct dc_plane_state *plane_state =
7872 			dm_plane_state_new->dc_state;
7873 		bool force_disable_dcc = !plane_state->dcc.enable;
7874 
7875 		fill_plane_buffer_attributes(
7876 			adev, afb, plane_state->format, plane_state->rotation,
7877 			afb->tiling_flags,
7878 			&plane_state->tiling_info, &plane_state->plane_size,
7879 			&plane_state->dcc, &plane_state->address,
7880 			afb->tmz_surface, force_disable_dcc);
7881 	}
7882 
7883 	return 0;
7884 
7885 error_unpin:
7886 	amdgpu_bo_unpin(rbo);
7887 
7888 error_unlock:
7889 	amdgpu_bo_unreserve(rbo);
7890 	return r;
7891 }
7892 
7893 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
7894 				       struct drm_plane_state *old_state)
7895 {
7896 	struct amdgpu_bo *rbo;
7897 	int r;
7898 
7899 	if (!old_state->fb)
7900 		return;
7901 
7902 	rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
7903 	r = amdgpu_bo_reserve(rbo, false);
7904 	if (unlikely(r)) {
7905 		DRM_ERROR("failed to reserve rbo before unpin\n");
7906 		return;
7907 	}
7908 
7909 	amdgpu_bo_unpin(rbo);
7910 	amdgpu_bo_unreserve(rbo);
7911 	amdgpu_bo_unref(&rbo);
7912 }
7913 
7914 static int dm_plane_helper_check_state(struct drm_plane_state *state,
7915 				       struct drm_crtc_state *new_crtc_state)
7916 {
7917 	struct drm_framebuffer *fb = state->fb;
7918 	int min_downscale, max_upscale;
7919 	int min_scale = 0;
7920 	int max_scale = INT_MAX;
7921 
7922 	/* Plane enabled? Validate viewport and get scaling factors from plane caps. */
7923 	if (fb && state->crtc) {
7924 		/* Validate viewport to cover the case when only the position changes */
7925 		if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
7926 			int viewport_width = state->crtc_w;
7927 			int viewport_height = state->crtc_h;
7928 
7929 			if (state->crtc_x < 0)
7930 				viewport_width += state->crtc_x;
7931 			else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
7932 				viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
7933 
7934 			if (state->crtc_y < 0)
7935 				viewport_height += state->crtc_y;
7936 			else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
7937 				viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
7938 
7939 			if (viewport_width < 0 || viewport_height < 0) {
7940 				DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
7941 				return -EINVAL;
7942 			} else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
7943 				DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
7944 				return -EINVAL;
7945 			} else if (viewport_height < MIN_VIEWPORT_SIZE) {
7946 				DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
7947 				return -EINVAL;
7948 			}
7949 
7950 		}
7951 
7952 		/* Get min/max allowed scaling factors from plane caps. */
7953 		get_min_max_dc_plane_scaling(state->crtc->dev, fb,
7954 					     &min_downscale, &max_upscale);
7955 		/*
7956 		 * Convert to drm convention: 16.16 fixed point, instead of dc's
7957 		 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
7958 		 * dst/src, so min_scale = 1.0 / max_upscale, etc.
7959 		 */
7960 		min_scale = (1000 << 16) / max_upscale;
7961 		max_scale = (1000 << 16) / min_downscale;
7962 	}
7963 
7964 	return drm_atomic_helper_check_plane_state(
7965 		state, new_crtc_state, min_scale, max_scale, true, true);
7966 }
7967 
7968 static int dm_plane_atomic_check(struct drm_plane *plane,
7969 				 struct drm_atomic_state *state)
7970 {
7971 	struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
7972 										 plane);
7973 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
7974 	struct dc *dc = adev->dm.dc;
7975 	struct dm_plane_state *dm_plane_state;
7976 	struct dc_scaling_info scaling_info;
7977 	struct drm_crtc_state *new_crtc_state;
7978 	int ret;
7979 
7980 	trace_amdgpu_dm_plane_atomic_check(new_plane_state);
7981 
7982 	dm_plane_state = to_dm_plane_state(new_plane_state);
7983 
7984 	if (!dm_plane_state->dc_state)
7985 		return 0;
7986 
7987 	new_crtc_state =
7988 		drm_atomic_get_new_crtc_state(state,
7989 					      new_plane_state->crtc);
7990 	if (!new_crtc_state)
7991 		return -EINVAL;
7992 
7993 	ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
7994 	if (ret)
7995 		return ret;
7996 
7997 	ret = fill_dc_scaling_info(adev, new_plane_state, &scaling_info);
7998 	if (ret)
7999 		return ret;
8000 
8001 	if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
8002 		return 0;
8003 
8004 	return -EINVAL;
8005 }
8006 
8007 static int dm_plane_atomic_async_check(struct drm_plane *plane,
8008 				       struct drm_atomic_state *state)
8009 {
8010 	/* Only support async updates on cursor planes. */
8011 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
8012 		return -EINVAL;
8013 
8014 	return 0;
8015 }
8016 
8017 static void dm_plane_atomic_async_update(struct drm_plane *plane,
8018 					 struct drm_atomic_state *state)
8019 {
8020 	struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
8021 									   plane);
8022 	struct drm_plane_state *old_state =
8023 		drm_atomic_get_old_plane_state(state, plane);
8024 
8025 	trace_amdgpu_dm_atomic_update_cursor(new_state);
8026 
8027 	swap(plane->state->fb, new_state->fb);
8028 
8029 	plane->state->src_x = new_state->src_x;
8030 	plane->state->src_y = new_state->src_y;
8031 	plane->state->src_w = new_state->src_w;
8032 	plane->state->src_h = new_state->src_h;
8033 	plane->state->crtc_x = new_state->crtc_x;
8034 	plane->state->crtc_y = new_state->crtc_y;
8035 	plane->state->crtc_w = new_state->crtc_w;
8036 	plane->state->crtc_h = new_state->crtc_h;
8037 
8038 	handle_cursor_update(plane, old_state);
8039 }
8040 
8041 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
8042 	.prepare_fb = dm_plane_helper_prepare_fb,
8043 	.cleanup_fb = dm_plane_helper_cleanup_fb,
8044 	.atomic_check = dm_plane_atomic_check,
8045 	.atomic_async_check = dm_plane_atomic_async_check,
8046 	.atomic_async_update = dm_plane_atomic_async_update
8047 };
8048 
8049 /*
8050  * TODO: these are currently initialized to rgb formats only.
8051  * For future use cases we should either initialize them dynamically based on
8052  * plane capabilities, or initialize this array to all formats, so internal drm
8053  * check will succeed, and let DC implement proper check
8054  */
8055 static const uint32_t rgb_formats[] = {
8056 	DRM_FORMAT_XRGB8888,
8057 	DRM_FORMAT_ARGB8888,
8058 	DRM_FORMAT_RGBA8888,
8059 	DRM_FORMAT_XRGB2101010,
8060 	DRM_FORMAT_XBGR2101010,
8061 	DRM_FORMAT_ARGB2101010,
8062 	DRM_FORMAT_ABGR2101010,
8063 	DRM_FORMAT_XRGB16161616,
8064 	DRM_FORMAT_XBGR16161616,
8065 	DRM_FORMAT_ARGB16161616,
8066 	DRM_FORMAT_ABGR16161616,
8067 	DRM_FORMAT_XBGR8888,
8068 	DRM_FORMAT_ABGR8888,
8069 	DRM_FORMAT_RGB565,
8070 };
8071 
8072 static const uint32_t overlay_formats[] = {
8073 	DRM_FORMAT_XRGB8888,
8074 	DRM_FORMAT_ARGB8888,
8075 	DRM_FORMAT_RGBA8888,
8076 	DRM_FORMAT_XBGR8888,
8077 	DRM_FORMAT_ABGR8888,
8078 	DRM_FORMAT_RGB565
8079 };
8080 
8081 static const u32 cursor_formats[] = {
8082 	DRM_FORMAT_ARGB8888
8083 };
8084 
8085 static int get_plane_formats(const struct drm_plane *plane,
8086 			     const struct dc_plane_cap *plane_cap,
8087 			     uint32_t *formats, int max_formats)
8088 {
8089 	int i, num_formats = 0;
8090 
8091 	/*
8092 	 * TODO: Query support for each group of formats directly from
8093 	 * DC plane caps. This will require adding more formats to the
8094 	 * caps list.
8095 	 */
8096 
8097 	switch (plane->type) {
8098 	case DRM_PLANE_TYPE_PRIMARY:
8099 		for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
8100 			if (num_formats >= max_formats)
8101 				break;
8102 
8103 			formats[num_formats++] = rgb_formats[i];
8104 		}
8105 
8106 		if (plane_cap && plane_cap->pixel_format_support.nv12)
8107 			formats[num_formats++] = DRM_FORMAT_NV12;
8108 		if (plane_cap && plane_cap->pixel_format_support.p010)
8109 			formats[num_formats++] = DRM_FORMAT_P010;
8110 		if (plane_cap && plane_cap->pixel_format_support.fp16) {
8111 			formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
8112 			formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
8113 			formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
8114 			formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
8115 		}
8116 		break;
8117 
8118 	case DRM_PLANE_TYPE_OVERLAY:
8119 		for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
8120 			if (num_formats >= max_formats)
8121 				break;
8122 
8123 			formats[num_formats++] = overlay_formats[i];
8124 		}
8125 		break;
8126 
8127 	case DRM_PLANE_TYPE_CURSOR:
8128 		for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
8129 			if (num_formats >= max_formats)
8130 				break;
8131 
8132 			formats[num_formats++] = cursor_formats[i];
8133 		}
8134 		break;
8135 	}
8136 
8137 	return num_formats;
8138 }
8139 
8140 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
8141 				struct drm_plane *plane,
8142 				unsigned long possible_crtcs,
8143 				const struct dc_plane_cap *plane_cap)
8144 {
8145 	uint32_t formats[32];
8146 	int num_formats;
8147 	int res = -EPERM;
8148 	unsigned int supported_rotations;
8149 	uint64_t *modifiers = NULL;
8150 
8151 	num_formats = get_plane_formats(plane, plane_cap, formats,
8152 					ARRAY_SIZE(formats));
8153 
8154 	res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
8155 	if (res)
8156 		return res;
8157 
8158 	if (modifiers == NULL)
8159 		adev_to_drm(dm->adev)->mode_config.fb_modifiers_not_supported = true;
8160 
8161 	res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
8162 				       &dm_plane_funcs, formats, num_formats,
8163 				       modifiers, plane->type, NULL);
8164 	kfree(modifiers);
8165 	if (res)
8166 		return res;
8167 
8168 	if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
8169 	    plane_cap && plane_cap->per_pixel_alpha) {
8170 		unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
8171 					  BIT(DRM_MODE_BLEND_PREMULTI) |
8172 					  BIT(DRM_MODE_BLEND_COVERAGE);
8173 
8174 		drm_plane_create_alpha_property(plane);
8175 		drm_plane_create_blend_mode_property(plane, blend_caps);
8176 	}
8177 
8178 	if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
8179 	    plane_cap &&
8180 	    (plane_cap->pixel_format_support.nv12 ||
8181 	     plane_cap->pixel_format_support.p010)) {
8182 		/* This only affects YUV formats. */
8183 		drm_plane_create_color_properties(
8184 			plane,
8185 			BIT(DRM_COLOR_YCBCR_BT601) |
8186 			BIT(DRM_COLOR_YCBCR_BT709) |
8187 			BIT(DRM_COLOR_YCBCR_BT2020),
8188 			BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
8189 			BIT(DRM_COLOR_YCBCR_FULL_RANGE),
8190 			DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
8191 	}
8192 
8193 	supported_rotations =
8194 		DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
8195 		DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
8196 
8197 	if (dm->adev->asic_type >= CHIP_BONAIRE &&
8198 	    plane->type != DRM_PLANE_TYPE_CURSOR)
8199 		drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
8200 						   supported_rotations);
8201 
8202 	drm_plane_helper_add(plane, &dm_plane_helper_funcs);
8203 
8204 	/* Create (reset) the plane state */
8205 	if (plane->funcs->reset)
8206 		plane->funcs->reset(plane);
8207 
8208 	return 0;
8209 }
8210 
8211 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
8212 			       struct drm_plane *plane,
8213 			       uint32_t crtc_index)
8214 {
8215 	struct amdgpu_crtc *acrtc = NULL;
8216 	struct drm_plane *cursor_plane;
8217 
8218 	int res = -ENOMEM;
8219 
8220 	cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
8221 	if (!cursor_plane)
8222 		goto fail;
8223 
8224 	cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
8225 	res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
8226 
8227 	acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
8228 	if (!acrtc)
8229 		goto fail;
8230 
8231 	res = drm_crtc_init_with_planes(
8232 			dm->ddev,
8233 			&acrtc->base,
8234 			plane,
8235 			cursor_plane,
8236 			&amdgpu_dm_crtc_funcs, NULL);
8237 
8238 	if (res)
8239 		goto fail;
8240 
8241 	drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
8242 
8243 	/* Create (reset) the plane state */
8244 	if (acrtc->base.funcs->reset)
8245 		acrtc->base.funcs->reset(&acrtc->base);
8246 
8247 	acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
8248 	acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
8249 
8250 	acrtc->crtc_id = crtc_index;
8251 	acrtc->base.enabled = false;
8252 	acrtc->otg_inst = -1;
8253 
8254 	dm->adev->mode_info.crtcs[crtc_index] = acrtc;
8255 	drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
8256 				   true, MAX_COLOR_LUT_ENTRIES);
8257 	drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
8258 
8259 	return 0;
8260 
8261 fail:
8262 	kfree(acrtc);
8263 	kfree(cursor_plane);
8264 	return res;
8265 }
8266 
8267 
8268 static int to_drm_connector_type(enum signal_type st)
8269 {
8270 	switch (st) {
8271 	case SIGNAL_TYPE_HDMI_TYPE_A:
8272 		return DRM_MODE_CONNECTOR_HDMIA;
8273 	case SIGNAL_TYPE_EDP:
8274 		return DRM_MODE_CONNECTOR_eDP;
8275 	case SIGNAL_TYPE_LVDS:
8276 		return DRM_MODE_CONNECTOR_LVDS;
8277 	case SIGNAL_TYPE_RGB:
8278 		return DRM_MODE_CONNECTOR_VGA;
8279 	case SIGNAL_TYPE_DISPLAY_PORT:
8280 	case SIGNAL_TYPE_DISPLAY_PORT_MST:
8281 		return DRM_MODE_CONNECTOR_DisplayPort;
8282 	case SIGNAL_TYPE_DVI_DUAL_LINK:
8283 	case SIGNAL_TYPE_DVI_SINGLE_LINK:
8284 		return DRM_MODE_CONNECTOR_DVID;
8285 	case SIGNAL_TYPE_VIRTUAL:
8286 		return DRM_MODE_CONNECTOR_VIRTUAL;
8287 
8288 	default:
8289 		return DRM_MODE_CONNECTOR_Unknown;
8290 	}
8291 }
8292 
8293 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
8294 {
8295 	struct drm_encoder *encoder;
8296 
8297 	/* There is only one encoder per connector */
8298 	drm_connector_for_each_possible_encoder(connector, encoder)
8299 		return encoder;
8300 
8301 	return NULL;
8302 }
8303 
8304 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
8305 {
8306 	struct drm_encoder *encoder;
8307 	struct amdgpu_encoder *amdgpu_encoder;
8308 
8309 	encoder = amdgpu_dm_connector_to_encoder(connector);
8310 
8311 	if (encoder == NULL)
8312 		return;
8313 
8314 	amdgpu_encoder = to_amdgpu_encoder(encoder);
8315 
8316 	amdgpu_encoder->native_mode.clock = 0;
8317 
8318 	if (!list_empty(&connector->probed_modes)) {
8319 		struct drm_display_mode *preferred_mode = NULL;
8320 
8321 		list_for_each_entry(preferred_mode,
8322 				    &connector->probed_modes,
8323 				    head) {
8324 			if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
8325 				amdgpu_encoder->native_mode = *preferred_mode;
8326 
8327 			break;
8328 		}
8329 
8330 	}
8331 }
8332 
8333 static struct drm_display_mode *
8334 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
8335 			     char *name,
8336 			     int hdisplay, int vdisplay)
8337 {
8338 	struct drm_device *dev = encoder->dev;
8339 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
8340 	struct drm_display_mode *mode = NULL;
8341 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
8342 
8343 	mode = drm_mode_duplicate(dev, native_mode);
8344 
8345 	if (mode == NULL)
8346 		return NULL;
8347 
8348 	mode->hdisplay = hdisplay;
8349 	mode->vdisplay = vdisplay;
8350 	mode->type &= ~DRM_MODE_TYPE_PREFERRED;
8351 	strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
8352 
8353 	return mode;
8354 
8355 }
8356 
8357 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
8358 						 struct drm_connector *connector)
8359 {
8360 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
8361 	struct drm_display_mode *mode = NULL;
8362 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
8363 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8364 				to_amdgpu_dm_connector(connector);
8365 	int i;
8366 	int n;
8367 	struct mode_size {
8368 		char name[DRM_DISPLAY_MODE_LEN];
8369 		int w;
8370 		int h;
8371 	} common_modes[] = {
8372 		{  "640x480",  640,  480},
8373 		{  "800x600",  800,  600},
8374 		{ "1024x768", 1024,  768},
8375 		{ "1280x720", 1280,  720},
8376 		{ "1280x800", 1280,  800},
8377 		{"1280x1024", 1280, 1024},
8378 		{ "1440x900", 1440,  900},
8379 		{"1680x1050", 1680, 1050},
8380 		{"1600x1200", 1600, 1200},
8381 		{"1920x1080", 1920, 1080},
8382 		{"1920x1200", 1920, 1200}
8383 	};
8384 
8385 	n = ARRAY_SIZE(common_modes);
8386 
8387 	for (i = 0; i < n; i++) {
8388 		struct drm_display_mode *curmode = NULL;
8389 		bool mode_existed = false;
8390 
8391 		if (common_modes[i].w > native_mode->hdisplay ||
8392 		    common_modes[i].h > native_mode->vdisplay ||
8393 		   (common_modes[i].w == native_mode->hdisplay &&
8394 		    common_modes[i].h == native_mode->vdisplay))
8395 			continue;
8396 
8397 		list_for_each_entry(curmode, &connector->probed_modes, head) {
8398 			if (common_modes[i].w == curmode->hdisplay &&
8399 			    common_modes[i].h == curmode->vdisplay) {
8400 				mode_existed = true;
8401 				break;
8402 			}
8403 		}
8404 
8405 		if (mode_existed)
8406 			continue;
8407 
8408 		mode = amdgpu_dm_create_common_mode(encoder,
8409 				common_modes[i].name, common_modes[i].w,
8410 				common_modes[i].h);
8411 		if (!mode)
8412 			continue;
8413 
8414 		drm_mode_probed_add(connector, mode);
8415 		amdgpu_dm_connector->num_modes++;
8416 	}
8417 }
8418 
8419 static void amdgpu_set_panel_orientation(struct drm_connector *connector)
8420 {
8421 	struct drm_encoder *encoder;
8422 	struct amdgpu_encoder *amdgpu_encoder;
8423 	const struct drm_display_mode *native_mode;
8424 
8425 	if (connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
8426 	    connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
8427 		return;
8428 
8429 	encoder = amdgpu_dm_connector_to_encoder(connector);
8430 	if (!encoder)
8431 		return;
8432 
8433 	amdgpu_encoder = to_amdgpu_encoder(encoder);
8434 
8435 	native_mode = &amdgpu_encoder->native_mode;
8436 	if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0)
8437 		return;
8438 
8439 	drm_connector_set_panel_orientation_with_quirk(connector,
8440 						       DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
8441 						       native_mode->hdisplay,
8442 						       native_mode->vdisplay);
8443 }
8444 
8445 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
8446 					      struct edid *edid)
8447 {
8448 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8449 			to_amdgpu_dm_connector(connector);
8450 
8451 	if (edid) {
8452 		/* empty probed_modes */
8453 		INIT_LIST_HEAD(&connector->probed_modes);
8454 		amdgpu_dm_connector->num_modes =
8455 				drm_add_edid_modes(connector, edid);
8456 
8457 		/* sorting the probed modes before calling function
8458 		 * amdgpu_dm_get_native_mode() since EDID can have
8459 		 * more than one preferred mode. The modes that are
8460 		 * later in the probed mode list could be of higher
8461 		 * and preferred resolution. For example, 3840x2160
8462 		 * resolution in base EDID preferred timing and 4096x2160
8463 		 * preferred resolution in DID extension block later.
8464 		 */
8465 		drm_mode_sort(&connector->probed_modes);
8466 		amdgpu_dm_get_native_mode(connector);
8467 
8468 		/* Freesync capabilities are reset by calling
8469 		 * drm_add_edid_modes() and need to be
8470 		 * restored here.
8471 		 */
8472 		amdgpu_dm_update_freesync_caps(connector, edid);
8473 
8474 		amdgpu_set_panel_orientation(connector);
8475 	} else {
8476 		amdgpu_dm_connector->num_modes = 0;
8477 	}
8478 }
8479 
8480 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
8481 			      struct drm_display_mode *mode)
8482 {
8483 	struct drm_display_mode *m;
8484 
8485 	list_for_each_entry (m, &aconnector->base.probed_modes, head) {
8486 		if (drm_mode_equal(m, mode))
8487 			return true;
8488 	}
8489 
8490 	return false;
8491 }
8492 
8493 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
8494 {
8495 	const struct drm_display_mode *m;
8496 	struct drm_display_mode *new_mode;
8497 	uint i;
8498 	uint32_t new_modes_count = 0;
8499 
8500 	/* Standard FPS values
8501 	 *
8502 	 * 23.976       - TV/NTSC
8503 	 * 24 	        - Cinema
8504 	 * 25 	        - TV/PAL
8505 	 * 29.97        - TV/NTSC
8506 	 * 30 	        - TV/NTSC
8507 	 * 48 	        - Cinema HFR
8508 	 * 50 	        - TV/PAL
8509 	 * 60 	        - Commonly used
8510 	 * 48,72,96,120 - Multiples of 24
8511 	 */
8512 	static const uint32_t common_rates[] = {
8513 		23976, 24000, 25000, 29970, 30000,
8514 		48000, 50000, 60000, 72000, 96000, 120000
8515 	};
8516 
8517 	/*
8518 	 * Find mode with highest refresh rate with the same resolution
8519 	 * as the preferred mode. Some monitors report a preferred mode
8520 	 * with lower resolution than the highest refresh rate supported.
8521 	 */
8522 
8523 	m = get_highest_refresh_rate_mode(aconnector, true);
8524 	if (!m)
8525 		return 0;
8526 
8527 	for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
8528 		uint64_t target_vtotal, target_vtotal_diff;
8529 		uint64_t num, den;
8530 
8531 		if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
8532 			continue;
8533 
8534 		if (common_rates[i] < aconnector->min_vfreq * 1000 ||
8535 		    common_rates[i] > aconnector->max_vfreq * 1000)
8536 			continue;
8537 
8538 		num = (unsigned long long)m->clock * 1000 * 1000;
8539 		den = common_rates[i] * (unsigned long long)m->htotal;
8540 		target_vtotal = div_u64(num, den);
8541 		target_vtotal_diff = target_vtotal - m->vtotal;
8542 
8543 		/* Check for illegal modes */
8544 		if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
8545 		    m->vsync_end + target_vtotal_diff < m->vsync_start ||
8546 		    m->vtotal + target_vtotal_diff < m->vsync_end)
8547 			continue;
8548 
8549 		new_mode = drm_mode_duplicate(aconnector->base.dev, m);
8550 		if (!new_mode)
8551 			goto out;
8552 
8553 		new_mode->vtotal += (u16)target_vtotal_diff;
8554 		new_mode->vsync_start += (u16)target_vtotal_diff;
8555 		new_mode->vsync_end += (u16)target_vtotal_diff;
8556 		new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
8557 		new_mode->type |= DRM_MODE_TYPE_DRIVER;
8558 
8559 		if (!is_duplicate_mode(aconnector, new_mode)) {
8560 			drm_mode_probed_add(&aconnector->base, new_mode);
8561 			new_modes_count += 1;
8562 		} else
8563 			drm_mode_destroy(aconnector->base.dev, new_mode);
8564 	}
8565  out:
8566 	return new_modes_count;
8567 }
8568 
8569 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
8570 						   struct edid *edid)
8571 {
8572 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8573 		to_amdgpu_dm_connector(connector);
8574 
8575 	if (!edid)
8576 		return;
8577 
8578 	if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
8579 		amdgpu_dm_connector->num_modes +=
8580 			add_fs_modes(amdgpu_dm_connector);
8581 }
8582 
8583 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
8584 {
8585 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8586 			to_amdgpu_dm_connector(connector);
8587 	struct drm_encoder *encoder;
8588 	struct edid *edid = amdgpu_dm_connector->edid;
8589 
8590 	encoder = amdgpu_dm_connector_to_encoder(connector);
8591 
8592 	if (!drm_edid_is_valid(edid)) {
8593 		amdgpu_dm_connector->num_modes =
8594 				drm_add_modes_noedid(connector, 640, 480);
8595 	} else {
8596 		amdgpu_dm_connector_ddc_get_modes(connector, edid);
8597 		amdgpu_dm_connector_add_common_modes(encoder, connector);
8598 		amdgpu_dm_connector_add_freesync_modes(connector, edid);
8599 	}
8600 	amdgpu_dm_fbc_init(connector);
8601 
8602 	return amdgpu_dm_connector->num_modes;
8603 }
8604 
8605 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
8606 				     struct amdgpu_dm_connector *aconnector,
8607 				     int connector_type,
8608 				     struct dc_link *link,
8609 				     int link_index)
8610 {
8611 	struct amdgpu_device *adev = drm_to_adev(dm->ddev);
8612 
8613 	/*
8614 	 * Some of the properties below require access to state, like bpc.
8615 	 * Allocate some default initial connector state with our reset helper.
8616 	 */
8617 	if (aconnector->base.funcs->reset)
8618 		aconnector->base.funcs->reset(&aconnector->base);
8619 
8620 	aconnector->connector_id = link_index;
8621 	aconnector->dc_link = link;
8622 	aconnector->base.interlace_allowed = false;
8623 	aconnector->base.doublescan_allowed = false;
8624 	aconnector->base.stereo_allowed = false;
8625 	aconnector->base.dpms = DRM_MODE_DPMS_OFF;
8626 	aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
8627 	aconnector->audio_inst = -1;
8628 	mutex_init(&aconnector->hpd_lock);
8629 
8630 	/*
8631 	 * configure support HPD hot plug connector_>polled default value is 0
8632 	 * which means HPD hot plug not supported
8633 	 */
8634 	switch (connector_type) {
8635 	case DRM_MODE_CONNECTOR_HDMIA:
8636 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8637 		aconnector->base.ycbcr_420_allowed =
8638 			link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
8639 		break;
8640 	case DRM_MODE_CONNECTOR_DisplayPort:
8641 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8642 		link->link_enc = link_enc_cfg_get_link_enc(link);
8643 		ASSERT(link->link_enc);
8644 		if (link->link_enc)
8645 			aconnector->base.ycbcr_420_allowed =
8646 			link->link_enc->features.dp_ycbcr420_supported ? true : false;
8647 		break;
8648 	case DRM_MODE_CONNECTOR_DVID:
8649 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8650 		break;
8651 	default:
8652 		break;
8653 	}
8654 
8655 	drm_object_attach_property(&aconnector->base.base,
8656 				dm->ddev->mode_config.scaling_mode_property,
8657 				DRM_MODE_SCALE_NONE);
8658 
8659 	drm_object_attach_property(&aconnector->base.base,
8660 				adev->mode_info.underscan_property,
8661 				UNDERSCAN_OFF);
8662 	drm_object_attach_property(&aconnector->base.base,
8663 				adev->mode_info.underscan_hborder_property,
8664 				0);
8665 	drm_object_attach_property(&aconnector->base.base,
8666 				adev->mode_info.underscan_vborder_property,
8667 				0);
8668 
8669 	if (!aconnector->mst_port)
8670 		drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
8671 
8672 	/* This defaults to the max in the range, but we want 8bpc for non-edp. */
8673 	aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
8674 	aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
8675 
8676 	if (connector_type == DRM_MODE_CONNECTOR_eDP &&
8677 	    (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
8678 		drm_object_attach_property(&aconnector->base.base,
8679 				adev->mode_info.abm_level_property, 0);
8680 	}
8681 
8682 	if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
8683 	    connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
8684 	    connector_type == DRM_MODE_CONNECTOR_eDP) {
8685 		drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
8686 
8687 		if (!aconnector->mst_port)
8688 			drm_connector_attach_vrr_capable_property(&aconnector->base);
8689 
8690 #ifdef CONFIG_DRM_AMD_DC_HDCP
8691 		if (adev->dm.hdcp_workqueue)
8692 			drm_connector_attach_content_protection_property(&aconnector->base, true);
8693 #endif
8694 	}
8695 }
8696 
8697 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
8698 			      struct i2c_msg *msgs, int num)
8699 {
8700 	struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
8701 	struct ddc_service *ddc_service = i2c->ddc_service;
8702 	struct i2c_command cmd;
8703 	int i;
8704 	int result = -EIO;
8705 
8706 	cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
8707 
8708 	if (!cmd.payloads)
8709 		return result;
8710 
8711 	cmd.number_of_payloads = num;
8712 	cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
8713 	cmd.speed = 100;
8714 
8715 	for (i = 0; i < num; i++) {
8716 		cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
8717 		cmd.payloads[i].address = msgs[i].addr;
8718 		cmd.payloads[i].length = msgs[i].len;
8719 		cmd.payloads[i].data = msgs[i].buf;
8720 	}
8721 
8722 	if (dc_submit_i2c(
8723 			ddc_service->ctx->dc,
8724 			ddc_service->link->link_index,
8725 			&cmd))
8726 		result = num;
8727 
8728 	kfree(cmd.payloads);
8729 	return result;
8730 }
8731 
8732 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
8733 {
8734 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
8735 }
8736 
8737 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
8738 	.master_xfer = amdgpu_dm_i2c_xfer,
8739 	.functionality = amdgpu_dm_i2c_func,
8740 };
8741 
8742 static struct amdgpu_i2c_adapter *
8743 create_i2c(struct ddc_service *ddc_service,
8744 	   int link_index,
8745 	   int *res)
8746 {
8747 	struct amdgpu_device *adev = ddc_service->ctx->driver_context;
8748 	struct amdgpu_i2c_adapter *i2c;
8749 
8750 	i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
8751 	if (!i2c)
8752 		return NULL;
8753 	i2c->base.owner = THIS_MODULE;
8754 	i2c->base.class = I2C_CLASS_DDC;
8755 	i2c->base.dev.parent = &adev->pdev->dev;
8756 	i2c->base.algo = &amdgpu_dm_i2c_algo;
8757 	snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
8758 	i2c_set_adapdata(&i2c->base, i2c);
8759 	i2c->ddc_service = ddc_service;
8760 
8761 	return i2c;
8762 }
8763 
8764 
8765 /*
8766  * Note: this function assumes that dc_link_detect() was called for the
8767  * dc_link which will be represented by this aconnector.
8768  */
8769 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
8770 				    struct amdgpu_dm_connector *aconnector,
8771 				    uint32_t link_index,
8772 				    struct amdgpu_encoder *aencoder)
8773 {
8774 	int res = 0;
8775 	int connector_type;
8776 	struct dc *dc = dm->dc;
8777 	struct dc_link *link = dc_get_link_at_index(dc, link_index);
8778 	struct amdgpu_i2c_adapter *i2c;
8779 
8780 	link->priv = aconnector;
8781 
8782 	DRM_DEBUG_DRIVER("%s()\n", __func__);
8783 
8784 	i2c = create_i2c(link->ddc, link->link_index, &res);
8785 	if (!i2c) {
8786 		DRM_ERROR("Failed to create i2c adapter data\n");
8787 		return -ENOMEM;
8788 	}
8789 
8790 	aconnector->i2c = i2c;
8791 	res = i2c_add_adapter(&i2c->base);
8792 
8793 	if (res) {
8794 		DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
8795 		goto out_free;
8796 	}
8797 
8798 	connector_type = to_drm_connector_type(link->connector_signal);
8799 
8800 	res = drm_connector_init_with_ddc(
8801 			dm->ddev,
8802 			&aconnector->base,
8803 			&amdgpu_dm_connector_funcs,
8804 			connector_type,
8805 			&i2c->base);
8806 
8807 	if (res) {
8808 		DRM_ERROR("connector_init failed\n");
8809 		aconnector->connector_id = -1;
8810 		goto out_free;
8811 	}
8812 
8813 	drm_connector_helper_add(
8814 			&aconnector->base,
8815 			&amdgpu_dm_connector_helper_funcs);
8816 
8817 	amdgpu_dm_connector_init_helper(
8818 		dm,
8819 		aconnector,
8820 		connector_type,
8821 		link,
8822 		link_index);
8823 
8824 	drm_connector_attach_encoder(
8825 		&aconnector->base, &aencoder->base);
8826 
8827 	if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
8828 		|| connector_type == DRM_MODE_CONNECTOR_eDP)
8829 		amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
8830 
8831 out_free:
8832 	if (res) {
8833 		kfree(i2c);
8834 		aconnector->i2c = NULL;
8835 	}
8836 	return res;
8837 }
8838 
8839 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
8840 {
8841 	switch (adev->mode_info.num_crtc) {
8842 	case 1:
8843 		return 0x1;
8844 	case 2:
8845 		return 0x3;
8846 	case 3:
8847 		return 0x7;
8848 	case 4:
8849 		return 0xf;
8850 	case 5:
8851 		return 0x1f;
8852 	case 6:
8853 	default:
8854 		return 0x3f;
8855 	}
8856 }
8857 
8858 static int amdgpu_dm_encoder_init(struct drm_device *dev,
8859 				  struct amdgpu_encoder *aencoder,
8860 				  uint32_t link_index)
8861 {
8862 	struct amdgpu_device *adev = drm_to_adev(dev);
8863 
8864 	int res = drm_encoder_init(dev,
8865 				   &aencoder->base,
8866 				   &amdgpu_dm_encoder_funcs,
8867 				   DRM_MODE_ENCODER_TMDS,
8868 				   NULL);
8869 
8870 	aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
8871 
8872 	if (!res)
8873 		aencoder->encoder_id = link_index;
8874 	else
8875 		aencoder->encoder_id = -1;
8876 
8877 	drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
8878 
8879 	return res;
8880 }
8881 
8882 static void manage_dm_interrupts(struct amdgpu_device *adev,
8883 				 struct amdgpu_crtc *acrtc,
8884 				 bool enable)
8885 {
8886 	/*
8887 	 * We have no guarantee that the frontend index maps to the same
8888 	 * backend index - some even map to more than one.
8889 	 *
8890 	 * TODO: Use a different interrupt or check DC itself for the mapping.
8891 	 */
8892 	int irq_type =
8893 		amdgpu_display_crtc_idx_to_irq_type(
8894 			adev,
8895 			acrtc->crtc_id);
8896 
8897 	if (enable) {
8898 		drm_crtc_vblank_on(&acrtc->base);
8899 		amdgpu_irq_get(
8900 			adev,
8901 			&adev->pageflip_irq,
8902 			irq_type);
8903 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8904 		amdgpu_irq_get(
8905 			adev,
8906 			&adev->vline0_irq,
8907 			irq_type);
8908 #endif
8909 	} else {
8910 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8911 		amdgpu_irq_put(
8912 			adev,
8913 			&adev->vline0_irq,
8914 			irq_type);
8915 #endif
8916 		amdgpu_irq_put(
8917 			adev,
8918 			&adev->pageflip_irq,
8919 			irq_type);
8920 		drm_crtc_vblank_off(&acrtc->base);
8921 	}
8922 }
8923 
8924 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
8925 				      struct amdgpu_crtc *acrtc)
8926 {
8927 	int irq_type =
8928 		amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
8929 
8930 	/**
8931 	 * This reads the current state for the IRQ and force reapplies
8932 	 * the setting to hardware.
8933 	 */
8934 	amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
8935 }
8936 
8937 static bool
8938 is_scaling_state_different(const struct dm_connector_state *dm_state,
8939 			   const struct dm_connector_state *old_dm_state)
8940 {
8941 	if (dm_state->scaling != old_dm_state->scaling)
8942 		return true;
8943 	if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
8944 		if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
8945 			return true;
8946 	} else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
8947 		if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
8948 			return true;
8949 	} else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
8950 		   dm_state->underscan_vborder != old_dm_state->underscan_vborder)
8951 		return true;
8952 	return false;
8953 }
8954 
8955 #ifdef CONFIG_DRM_AMD_DC_HDCP
8956 static bool is_content_protection_different(struct drm_connector_state *state,
8957 					    const struct drm_connector_state *old_state,
8958 					    const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
8959 {
8960 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8961 	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
8962 
8963 	/* Handle: Type0/1 change */
8964 	if (old_state->hdcp_content_type != state->hdcp_content_type &&
8965 	    state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
8966 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8967 		return true;
8968 	}
8969 
8970 	/* CP is being re enabled, ignore this
8971 	 *
8972 	 * Handles:	ENABLED -> DESIRED
8973 	 */
8974 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
8975 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8976 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
8977 		return false;
8978 	}
8979 
8980 	/* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
8981 	 *
8982 	 * Handles:	UNDESIRED -> ENABLED
8983 	 */
8984 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
8985 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
8986 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8987 
8988 	/* Stream removed and re-enabled
8989 	 *
8990 	 * Can sometimes overlap with the HPD case,
8991 	 * thus set update_hdcp to false to avoid
8992 	 * setting HDCP multiple times.
8993 	 *
8994 	 * Handles:	DESIRED -> DESIRED (Special case)
8995 	 */
8996 	if (!(old_state->crtc && old_state->crtc->enabled) &&
8997 		state->crtc && state->crtc->enabled &&
8998 		connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8999 		dm_con_state->update_hdcp = false;
9000 		return true;
9001 	}
9002 
9003 	/* Hot-plug, headless s3, dpms
9004 	 *
9005 	 * Only start HDCP if the display is connected/enabled.
9006 	 * update_hdcp flag will be set to false until the next
9007 	 * HPD comes in.
9008 	 *
9009 	 * Handles:	DESIRED -> DESIRED (Special case)
9010 	 */
9011 	if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
9012 	    connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
9013 		dm_con_state->update_hdcp = false;
9014 		return true;
9015 	}
9016 
9017 	/*
9018 	 * Handles:	UNDESIRED -> UNDESIRED
9019 	 *		DESIRED -> DESIRED
9020 	 *		ENABLED -> ENABLED
9021 	 */
9022 	if (old_state->content_protection == state->content_protection)
9023 		return false;
9024 
9025 	/*
9026 	 * Handles:	UNDESIRED -> DESIRED
9027 	 *		DESIRED -> UNDESIRED
9028 	 *		ENABLED -> UNDESIRED
9029 	 */
9030 	if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
9031 		return true;
9032 
9033 	/*
9034 	 * Handles:	DESIRED -> ENABLED
9035 	 */
9036 	return false;
9037 }
9038 
9039 #endif
9040 static void remove_stream(struct amdgpu_device *adev,
9041 			  struct amdgpu_crtc *acrtc,
9042 			  struct dc_stream_state *stream)
9043 {
9044 	/* this is the update mode case */
9045 
9046 	acrtc->otg_inst = -1;
9047 	acrtc->enabled = false;
9048 }
9049 
9050 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
9051 			       struct dc_cursor_position *position)
9052 {
9053 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
9054 	int x, y;
9055 	int xorigin = 0, yorigin = 0;
9056 
9057 	if (!crtc || !plane->state->fb)
9058 		return 0;
9059 
9060 	if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
9061 	    (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
9062 		DRM_ERROR("%s: bad cursor width or height %d x %d\n",
9063 			  __func__,
9064 			  plane->state->crtc_w,
9065 			  plane->state->crtc_h);
9066 		return -EINVAL;
9067 	}
9068 
9069 	x = plane->state->crtc_x;
9070 	y = plane->state->crtc_y;
9071 
9072 	if (x <= -amdgpu_crtc->max_cursor_width ||
9073 	    y <= -amdgpu_crtc->max_cursor_height)
9074 		return 0;
9075 
9076 	if (x < 0) {
9077 		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
9078 		x = 0;
9079 	}
9080 	if (y < 0) {
9081 		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
9082 		y = 0;
9083 	}
9084 	position->enable = true;
9085 	position->translate_by_source = true;
9086 	position->x = x;
9087 	position->y = y;
9088 	position->x_hotspot = xorigin;
9089 	position->y_hotspot = yorigin;
9090 
9091 	return 0;
9092 }
9093 
9094 static void handle_cursor_update(struct drm_plane *plane,
9095 				 struct drm_plane_state *old_plane_state)
9096 {
9097 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
9098 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
9099 	struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
9100 	struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
9101 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
9102 	uint64_t address = afb ? afb->address : 0;
9103 	struct dc_cursor_position position = {0};
9104 	struct dc_cursor_attributes attributes;
9105 	int ret;
9106 
9107 	if (!plane->state->fb && !old_plane_state->fb)
9108 		return;
9109 
9110 	DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
9111 		      __func__,
9112 		      amdgpu_crtc->crtc_id,
9113 		      plane->state->crtc_w,
9114 		      plane->state->crtc_h);
9115 
9116 	ret = get_cursor_position(plane, crtc, &position);
9117 	if (ret)
9118 		return;
9119 
9120 	if (!position.enable) {
9121 		/* turn off cursor */
9122 		if (crtc_state && crtc_state->stream) {
9123 			mutex_lock(&adev->dm.dc_lock);
9124 			dc_stream_set_cursor_position(crtc_state->stream,
9125 						      &position);
9126 			mutex_unlock(&adev->dm.dc_lock);
9127 		}
9128 		return;
9129 	}
9130 
9131 	amdgpu_crtc->cursor_width = plane->state->crtc_w;
9132 	amdgpu_crtc->cursor_height = plane->state->crtc_h;
9133 
9134 	memset(&attributes, 0, sizeof(attributes));
9135 	attributes.address.high_part = upper_32_bits(address);
9136 	attributes.address.low_part  = lower_32_bits(address);
9137 	attributes.width             = plane->state->crtc_w;
9138 	attributes.height            = plane->state->crtc_h;
9139 	attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
9140 	attributes.rotation_angle    = 0;
9141 	attributes.attribute_flags.value = 0;
9142 
9143 	attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
9144 
9145 	if (crtc_state->stream) {
9146 		mutex_lock(&adev->dm.dc_lock);
9147 		if (!dc_stream_set_cursor_attributes(crtc_state->stream,
9148 							 &attributes))
9149 			DRM_ERROR("DC failed to set cursor attributes\n");
9150 
9151 		if (!dc_stream_set_cursor_position(crtc_state->stream,
9152 						   &position))
9153 			DRM_ERROR("DC failed to set cursor position\n");
9154 		mutex_unlock(&adev->dm.dc_lock);
9155 	}
9156 }
9157 
9158 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
9159 {
9160 
9161 	assert_spin_locked(&acrtc->base.dev->event_lock);
9162 	WARN_ON(acrtc->event);
9163 
9164 	acrtc->event = acrtc->base.state->event;
9165 
9166 	/* Set the flip status */
9167 	acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
9168 
9169 	/* Mark this event as consumed */
9170 	acrtc->base.state->event = NULL;
9171 
9172 	DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
9173 		     acrtc->crtc_id);
9174 }
9175 
9176 static void update_freesync_state_on_stream(
9177 	struct amdgpu_display_manager *dm,
9178 	struct dm_crtc_state *new_crtc_state,
9179 	struct dc_stream_state *new_stream,
9180 	struct dc_plane_state *surface,
9181 	u32 flip_timestamp_in_us)
9182 {
9183 	struct mod_vrr_params vrr_params;
9184 	struct dc_info_packet vrr_infopacket = {0};
9185 	struct amdgpu_device *adev = dm->adev;
9186 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
9187 	unsigned long flags;
9188 	bool pack_sdp_v1_3 = false;
9189 
9190 	if (!new_stream)
9191 		return;
9192 
9193 	/*
9194 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
9195 	 * For now it's sufficient to just guard against these conditions.
9196 	 */
9197 
9198 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
9199 		return;
9200 
9201 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9202         vrr_params = acrtc->dm_irq_params.vrr_params;
9203 
9204 	if (surface) {
9205 		mod_freesync_handle_preflip(
9206 			dm->freesync_module,
9207 			surface,
9208 			new_stream,
9209 			flip_timestamp_in_us,
9210 			&vrr_params);
9211 
9212 		if (adev->family < AMDGPU_FAMILY_AI &&
9213 		    amdgpu_dm_vrr_active(new_crtc_state)) {
9214 			mod_freesync_handle_v_update(dm->freesync_module,
9215 						     new_stream, &vrr_params);
9216 
9217 			/* Need to call this before the frame ends. */
9218 			dc_stream_adjust_vmin_vmax(dm->dc,
9219 						   new_crtc_state->stream,
9220 						   &vrr_params.adjust);
9221 		}
9222 	}
9223 
9224 	mod_freesync_build_vrr_infopacket(
9225 		dm->freesync_module,
9226 		new_stream,
9227 		&vrr_params,
9228 		PACKET_TYPE_VRR,
9229 		TRANSFER_FUNC_UNKNOWN,
9230 		&vrr_infopacket,
9231 		pack_sdp_v1_3);
9232 
9233 	new_crtc_state->freesync_timing_changed |=
9234 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
9235 			&vrr_params.adjust,
9236 			sizeof(vrr_params.adjust)) != 0);
9237 
9238 	new_crtc_state->freesync_vrr_info_changed |=
9239 		(memcmp(&new_crtc_state->vrr_infopacket,
9240 			&vrr_infopacket,
9241 			sizeof(vrr_infopacket)) != 0);
9242 
9243 	acrtc->dm_irq_params.vrr_params = vrr_params;
9244 	new_crtc_state->vrr_infopacket = vrr_infopacket;
9245 
9246 	new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
9247 	new_stream->vrr_infopacket = vrr_infopacket;
9248 
9249 	if (new_crtc_state->freesync_vrr_info_changed)
9250 		DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
9251 			      new_crtc_state->base.crtc->base.id,
9252 			      (int)new_crtc_state->base.vrr_enabled,
9253 			      (int)vrr_params.state);
9254 
9255 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9256 }
9257 
9258 static void update_stream_irq_parameters(
9259 	struct amdgpu_display_manager *dm,
9260 	struct dm_crtc_state *new_crtc_state)
9261 {
9262 	struct dc_stream_state *new_stream = new_crtc_state->stream;
9263 	struct mod_vrr_params vrr_params;
9264 	struct mod_freesync_config config = new_crtc_state->freesync_config;
9265 	struct amdgpu_device *adev = dm->adev;
9266 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
9267 	unsigned long flags;
9268 
9269 	if (!new_stream)
9270 		return;
9271 
9272 	/*
9273 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
9274 	 * For now it's sufficient to just guard against these conditions.
9275 	 */
9276 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
9277 		return;
9278 
9279 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9280 	vrr_params = acrtc->dm_irq_params.vrr_params;
9281 
9282 	if (new_crtc_state->vrr_supported &&
9283 	    config.min_refresh_in_uhz &&
9284 	    config.max_refresh_in_uhz) {
9285 		/*
9286 		 * if freesync compatible mode was set, config.state will be set
9287 		 * in atomic check
9288 		 */
9289 		if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
9290 		    (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
9291 		     new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
9292 			vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
9293 			vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
9294 			vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
9295 			vrr_params.state = VRR_STATE_ACTIVE_FIXED;
9296 		} else {
9297 			config.state = new_crtc_state->base.vrr_enabled ?
9298 						     VRR_STATE_ACTIVE_VARIABLE :
9299 						     VRR_STATE_INACTIVE;
9300 		}
9301 	} else {
9302 		config.state = VRR_STATE_UNSUPPORTED;
9303 	}
9304 
9305 	mod_freesync_build_vrr_params(dm->freesync_module,
9306 				      new_stream,
9307 				      &config, &vrr_params);
9308 
9309 	new_crtc_state->freesync_timing_changed |=
9310 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
9311 			&vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
9312 
9313 	new_crtc_state->freesync_config = config;
9314 	/* Copy state for access from DM IRQ handler */
9315 	acrtc->dm_irq_params.freesync_config = config;
9316 	acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
9317 	acrtc->dm_irq_params.vrr_params = vrr_params;
9318 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9319 }
9320 
9321 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
9322 					    struct dm_crtc_state *new_state)
9323 {
9324 	bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
9325 	bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
9326 
9327 	if (!old_vrr_active && new_vrr_active) {
9328 		/* Transition VRR inactive -> active:
9329 		 * While VRR is active, we must not disable vblank irq, as a
9330 		 * reenable after disable would compute bogus vblank/pflip
9331 		 * timestamps if it likely happened inside display front-porch.
9332 		 *
9333 		 * We also need vupdate irq for the actual core vblank handling
9334 		 * at end of vblank.
9335 		 */
9336 		dm_set_vupdate_irq(new_state->base.crtc, true);
9337 		drm_crtc_vblank_get(new_state->base.crtc);
9338 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
9339 				 __func__, new_state->base.crtc->base.id);
9340 	} else if (old_vrr_active && !new_vrr_active) {
9341 		/* Transition VRR active -> inactive:
9342 		 * Allow vblank irq disable again for fixed refresh rate.
9343 		 */
9344 		dm_set_vupdate_irq(new_state->base.crtc, false);
9345 		drm_crtc_vblank_put(new_state->base.crtc);
9346 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
9347 				 __func__, new_state->base.crtc->base.id);
9348 	}
9349 }
9350 
9351 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
9352 {
9353 	struct drm_plane *plane;
9354 	struct drm_plane_state *old_plane_state;
9355 	int i;
9356 
9357 	/*
9358 	 * TODO: Make this per-stream so we don't issue redundant updates for
9359 	 * commits with multiple streams.
9360 	 */
9361 	for_each_old_plane_in_state(state, plane, old_plane_state, i)
9362 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
9363 			handle_cursor_update(plane, old_plane_state);
9364 }
9365 
9366 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
9367 				    struct dc_state *dc_state,
9368 				    struct drm_device *dev,
9369 				    struct amdgpu_display_manager *dm,
9370 				    struct drm_crtc *pcrtc,
9371 				    bool wait_for_vblank)
9372 {
9373 	uint32_t i;
9374 	uint64_t timestamp_ns;
9375 	struct drm_plane *plane;
9376 	struct drm_plane_state *old_plane_state, *new_plane_state;
9377 	struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
9378 	struct drm_crtc_state *new_pcrtc_state =
9379 			drm_atomic_get_new_crtc_state(state, pcrtc);
9380 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
9381 	struct dm_crtc_state *dm_old_crtc_state =
9382 			to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
9383 	int planes_count = 0, vpos, hpos;
9384 	long r;
9385 	unsigned long flags;
9386 	struct amdgpu_bo *abo;
9387 	uint32_t target_vblank, last_flip_vblank;
9388 	bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
9389 	bool pflip_present = false;
9390 	struct {
9391 		struct dc_surface_update surface_updates[MAX_SURFACES];
9392 		struct dc_plane_info plane_infos[MAX_SURFACES];
9393 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
9394 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
9395 		struct dc_stream_update stream_update;
9396 	} *bundle;
9397 
9398 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
9399 
9400 	if (!bundle) {
9401 		dm_error("Failed to allocate update bundle\n");
9402 		goto cleanup;
9403 	}
9404 
9405 	/*
9406 	 * Disable the cursor first if we're disabling all the planes.
9407 	 * It'll remain on the screen after the planes are re-enabled
9408 	 * if we don't.
9409 	 */
9410 	if (acrtc_state->active_planes == 0)
9411 		amdgpu_dm_commit_cursors(state);
9412 
9413 	/* update planes when needed */
9414 	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
9415 		struct drm_crtc *crtc = new_plane_state->crtc;
9416 		struct drm_crtc_state *new_crtc_state;
9417 		struct drm_framebuffer *fb = new_plane_state->fb;
9418 		struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
9419 		bool plane_needs_flip;
9420 		struct dc_plane_state *dc_plane;
9421 		struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
9422 
9423 		/* Cursor plane is handled after stream updates */
9424 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
9425 			continue;
9426 
9427 		if (!fb || !crtc || pcrtc != crtc)
9428 			continue;
9429 
9430 		new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
9431 		if (!new_crtc_state->active)
9432 			continue;
9433 
9434 		dc_plane = dm_new_plane_state->dc_state;
9435 
9436 		bundle->surface_updates[planes_count].surface = dc_plane;
9437 		if (new_pcrtc_state->color_mgmt_changed) {
9438 			bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
9439 			bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
9440 			bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
9441 		}
9442 
9443 		fill_dc_scaling_info(dm->adev, new_plane_state,
9444 				     &bundle->scaling_infos[planes_count]);
9445 
9446 		bundle->surface_updates[planes_count].scaling_info =
9447 			&bundle->scaling_infos[planes_count];
9448 
9449 		plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
9450 
9451 		pflip_present = pflip_present || plane_needs_flip;
9452 
9453 		if (!plane_needs_flip) {
9454 			planes_count += 1;
9455 			continue;
9456 		}
9457 
9458 		abo = gem_to_amdgpu_bo(fb->obj[0]);
9459 
9460 		/*
9461 		 * Wait for all fences on this FB. Do limited wait to avoid
9462 		 * deadlock during GPU reset when this fence will not signal
9463 		 * but we hold reservation lock for the BO.
9464 		 */
9465 		r = dma_resv_wait_timeout(abo->tbo.base.resv,
9466 					  DMA_RESV_USAGE_WRITE, false,
9467 					  msecs_to_jiffies(5000));
9468 		if (unlikely(r <= 0))
9469 			DRM_ERROR("Waiting for fences timed out!");
9470 
9471 		fill_dc_plane_info_and_addr(
9472 			dm->adev, new_plane_state,
9473 			afb->tiling_flags,
9474 			&bundle->plane_infos[planes_count],
9475 			&bundle->flip_addrs[planes_count].address,
9476 			afb->tmz_surface, false);
9477 
9478 		drm_dbg_state(state->dev, "plane: id=%d dcc_en=%d\n",
9479 				 new_plane_state->plane->index,
9480 				 bundle->plane_infos[planes_count].dcc.enable);
9481 
9482 		bundle->surface_updates[planes_count].plane_info =
9483 			&bundle->plane_infos[planes_count];
9484 
9485 		fill_dc_dirty_rects(plane, old_plane_state, new_plane_state,
9486 				    new_crtc_state,
9487 				    &bundle->flip_addrs[planes_count]);
9488 
9489 		/*
9490 		 * Only allow immediate flips for fast updates that don't
9491 		 * change FB pitch, DCC state, rotation or mirroing.
9492 		 */
9493 		bundle->flip_addrs[planes_count].flip_immediate =
9494 			crtc->state->async_flip &&
9495 			acrtc_state->update_type == UPDATE_TYPE_FAST;
9496 
9497 		timestamp_ns = ktime_get_ns();
9498 		bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
9499 		bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
9500 		bundle->surface_updates[planes_count].surface = dc_plane;
9501 
9502 		if (!bundle->surface_updates[planes_count].surface) {
9503 			DRM_ERROR("No surface for CRTC: id=%d\n",
9504 					acrtc_attach->crtc_id);
9505 			continue;
9506 		}
9507 
9508 		if (plane == pcrtc->primary)
9509 			update_freesync_state_on_stream(
9510 				dm,
9511 				acrtc_state,
9512 				acrtc_state->stream,
9513 				dc_plane,
9514 				bundle->flip_addrs[planes_count].flip_timestamp_in_us);
9515 
9516 		drm_dbg_state(state->dev, "%s Flipping to hi: 0x%x, low: 0x%x\n",
9517 				 __func__,
9518 				 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
9519 				 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
9520 
9521 		planes_count += 1;
9522 
9523 	}
9524 
9525 	if (pflip_present) {
9526 		if (!vrr_active) {
9527 			/* Use old throttling in non-vrr fixed refresh rate mode
9528 			 * to keep flip scheduling based on target vblank counts
9529 			 * working in a backwards compatible way, e.g., for
9530 			 * clients using the GLX_OML_sync_control extension or
9531 			 * DRI3/Present extension with defined target_msc.
9532 			 */
9533 			last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
9534 		}
9535 		else {
9536 			/* For variable refresh rate mode only:
9537 			 * Get vblank of last completed flip to avoid > 1 vrr
9538 			 * flips per video frame by use of throttling, but allow
9539 			 * flip programming anywhere in the possibly large
9540 			 * variable vrr vblank interval for fine-grained flip
9541 			 * timing control and more opportunity to avoid stutter
9542 			 * on late submission of flips.
9543 			 */
9544 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9545 			last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
9546 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9547 		}
9548 
9549 		target_vblank = last_flip_vblank + wait_for_vblank;
9550 
9551 		/*
9552 		 * Wait until we're out of the vertical blank period before the one
9553 		 * targeted by the flip
9554 		 */
9555 		while ((acrtc_attach->enabled &&
9556 			(amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
9557 							    0, &vpos, &hpos, NULL,
9558 							    NULL, &pcrtc->hwmode)
9559 			 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
9560 			(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
9561 			(int)(target_vblank -
9562 			  amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
9563 			usleep_range(1000, 1100);
9564 		}
9565 
9566 		/**
9567 		 * Prepare the flip event for the pageflip interrupt to handle.
9568 		 *
9569 		 * This only works in the case where we've already turned on the
9570 		 * appropriate hardware blocks (eg. HUBP) so in the transition case
9571 		 * from 0 -> n planes we have to skip a hardware generated event
9572 		 * and rely on sending it from software.
9573 		 */
9574 		if (acrtc_attach->base.state->event &&
9575 		    acrtc_state->active_planes > 0) {
9576 			drm_crtc_vblank_get(pcrtc);
9577 
9578 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9579 
9580 			WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
9581 			prepare_flip_isr(acrtc_attach);
9582 
9583 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9584 		}
9585 
9586 		if (acrtc_state->stream) {
9587 			if (acrtc_state->freesync_vrr_info_changed)
9588 				bundle->stream_update.vrr_infopacket =
9589 					&acrtc_state->stream->vrr_infopacket;
9590 		}
9591 	}
9592 
9593 	/* Update the planes if changed or disable if we don't have any. */
9594 	if ((planes_count || acrtc_state->active_planes == 0) &&
9595 		acrtc_state->stream) {
9596 		/*
9597 		 * If PSR or idle optimizations are enabled then flush out
9598 		 * any pending work before hardware programming.
9599 		 */
9600 		if (dm->vblank_control_workqueue)
9601 			flush_workqueue(dm->vblank_control_workqueue);
9602 
9603 		bundle->stream_update.stream = acrtc_state->stream;
9604 		if (new_pcrtc_state->mode_changed) {
9605 			bundle->stream_update.src = acrtc_state->stream->src;
9606 			bundle->stream_update.dst = acrtc_state->stream->dst;
9607 		}
9608 
9609 		if (new_pcrtc_state->color_mgmt_changed) {
9610 			/*
9611 			 * TODO: This isn't fully correct since we've actually
9612 			 * already modified the stream in place.
9613 			 */
9614 			bundle->stream_update.gamut_remap =
9615 				&acrtc_state->stream->gamut_remap_matrix;
9616 			bundle->stream_update.output_csc_transform =
9617 				&acrtc_state->stream->csc_color_matrix;
9618 			bundle->stream_update.out_transfer_func =
9619 				acrtc_state->stream->out_transfer_func;
9620 		}
9621 
9622 		acrtc_state->stream->abm_level = acrtc_state->abm_level;
9623 		if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
9624 			bundle->stream_update.abm_level = &acrtc_state->abm_level;
9625 
9626 		/*
9627 		 * If FreeSync state on the stream has changed then we need to
9628 		 * re-adjust the min/max bounds now that DC doesn't handle this
9629 		 * as part of commit.
9630 		 */
9631 		if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
9632 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9633 			dc_stream_adjust_vmin_vmax(
9634 				dm->dc, acrtc_state->stream,
9635 				&acrtc_attach->dm_irq_params.vrr_params.adjust);
9636 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9637 		}
9638 		mutex_lock(&dm->dc_lock);
9639 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
9640 				acrtc_state->stream->link->psr_settings.psr_allow_active)
9641 			amdgpu_dm_psr_disable(acrtc_state->stream);
9642 
9643 		dc_commit_updates_for_stream(dm->dc,
9644 						     bundle->surface_updates,
9645 						     planes_count,
9646 						     acrtc_state->stream,
9647 						     &bundle->stream_update,
9648 						     dc_state);
9649 
9650 		/**
9651 		 * Enable or disable the interrupts on the backend.
9652 		 *
9653 		 * Most pipes are put into power gating when unused.
9654 		 *
9655 		 * When power gating is enabled on a pipe we lose the
9656 		 * interrupt enablement state when power gating is disabled.
9657 		 *
9658 		 * So we need to update the IRQ control state in hardware
9659 		 * whenever the pipe turns on (since it could be previously
9660 		 * power gated) or off (since some pipes can't be power gated
9661 		 * on some ASICs).
9662 		 */
9663 		if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
9664 			dm_update_pflip_irq_state(drm_to_adev(dev),
9665 						  acrtc_attach);
9666 
9667 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
9668 				acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
9669 				!acrtc_state->stream->link->psr_settings.psr_feature_enabled)
9670 			amdgpu_dm_link_setup_psr(acrtc_state->stream);
9671 
9672 		/* Decrement skip count when PSR is enabled and we're doing fast updates. */
9673 		if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
9674 		    acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
9675 			struct amdgpu_dm_connector *aconn =
9676 				(struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
9677 
9678 			if (aconn->psr_skip_count > 0)
9679 				aconn->psr_skip_count--;
9680 
9681 			/* Allow PSR when skip count is 0. */
9682 			acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count;
9683 
9684 			/*
9685 			 * If sink supports PSR SU, there is no need to rely on
9686 			 * a vblank event disable request to enable PSR. PSR SU
9687 			 * can be enabled immediately once OS demonstrates an
9688 			 * adequate number of fast atomic commits to notify KMD
9689 			 * of update events. See `vblank_control_worker()`.
9690 			 */
9691 			if (acrtc_state->stream->link->psr_settings.psr_version >= DC_PSR_VERSION_SU_1 &&
9692 			    acrtc_attach->dm_irq_params.allow_psr_entry &&
9693 			    !acrtc_state->stream->link->psr_settings.psr_allow_active)
9694 				amdgpu_dm_psr_enable(acrtc_state->stream);
9695 		} else {
9696 			acrtc_attach->dm_irq_params.allow_psr_entry = false;
9697 		}
9698 
9699 		mutex_unlock(&dm->dc_lock);
9700 	}
9701 
9702 	/*
9703 	 * Update cursor state *after* programming all the planes.
9704 	 * This avoids redundant programming in the case where we're going
9705 	 * to be disabling a single plane - those pipes are being disabled.
9706 	 */
9707 	if (acrtc_state->active_planes)
9708 		amdgpu_dm_commit_cursors(state);
9709 
9710 cleanup:
9711 	kfree(bundle);
9712 }
9713 
9714 static void amdgpu_dm_commit_audio(struct drm_device *dev,
9715 				   struct drm_atomic_state *state)
9716 {
9717 	struct amdgpu_device *adev = drm_to_adev(dev);
9718 	struct amdgpu_dm_connector *aconnector;
9719 	struct drm_connector *connector;
9720 	struct drm_connector_state *old_con_state, *new_con_state;
9721 	struct drm_crtc_state *new_crtc_state;
9722 	struct dm_crtc_state *new_dm_crtc_state;
9723 	const struct dc_stream_status *status;
9724 	int i, inst;
9725 
9726 	/* Notify device removals. */
9727 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9728 		if (old_con_state->crtc != new_con_state->crtc) {
9729 			/* CRTC changes require notification. */
9730 			goto notify;
9731 		}
9732 
9733 		if (!new_con_state->crtc)
9734 			continue;
9735 
9736 		new_crtc_state = drm_atomic_get_new_crtc_state(
9737 			state, new_con_state->crtc);
9738 
9739 		if (!new_crtc_state)
9740 			continue;
9741 
9742 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9743 			continue;
9744 
9745 	notify:
9746 		aconnector = to_amdgpu_dm_connector(connector);
9747 
9748 		mutex_lock(&adev->dm.audio_lock);
9749 		inst = aconnector->audio_inst;
9750 		aconnector->audio_inst = -1;
9751 		mutex_unlock(&adev->dm.audio_lock);
9752 
9753 		amdgpu_dm_audio_eld_notify(adev, inst);
9754 	}
9755 
9756 	/* Notify audio device additions. */
9757 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
9758 		if (!new_con_state->crtc)
9759 			continue;
9760 
9761 		new_crtc_state = drm_atomic_get_new_crtc_state(
9762 			state, new_con_state->crtc);
9763 
9764 		if (!new_crtc_state)
9765 			continue;
9766 
9767 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9768 			continue;
9769 
9770 		new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
9771 		if (!new_dm_crtc_state->stream)
9772 			continue;
9773 
9774 		status = dc_stream_get_status(new_dm_crtc_state->stream);
9775 		if (!status)
9776 			continue;
9777 
9778 		aconnector = to_amdgpu_dm_connector(connector);
9779 
9780 		mutex_lock(&adev->dm.audio_lock);
9781 		inst = status->audio_inst;
9782 		aconnector->audio_inst = inst;
9783 		mutex_unlock(&adev->dm.audio_lock);
9784 
9785 		amdgpu_dm_audio_eld_notify(adev, inst);
9786 	}
9787 }
9788 
9789 /*
9790  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
9791  * @crtc_state: the DRM CRTC state
9792  * @stream_state: the DC stream state.
9793  *
9794  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
9795  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
9796  */
9797 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
9798 						struct dc_stream_state *stream_state)
9799 {
9800 	stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
9801 }
9802 
9803 /**
9804  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
9805  * @state: The atomic state to commit
9806  *
9807  * This will tell DC to commit the constructed DC state from atomic_check,
9808  * programming the hardware. Any failures here implies a hardware failure, since
9809  * atomic check should have filtered anything non-kosher.
9810  */
9811 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
9812 {
9813 	struct drm_device *dev = state->dev;
9814 	struct amdgpu_device *adev = drm_to_adev(dev);
9815 	struct amdgpu_display_manager *dm = &adev->dm;
9816 	struct dm_atomic_state *dm_state;
9817 	struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
9818 	uint32_t i, j;
9819 	struct drm_crtc *crtc;
9820 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9821 	unsigned long flags;
9822 	bool wait_for_vblank = true;
9823 	struct drm_connector *connector;
9824 	struct drm_connector_state *old_con_state, *new_con_state;
9825 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9826 	int crtc_disable_count = 0;
9827 	bool mode_set_reset_required = false;
9828 
9829 	trace_amdgpu_dm_atomic_commit_tail_begin(state);
9830 
9831 	drm_atomic_helper_update_legacy_modeset_state(dev, state);
9832 
9833 	dm_state = dm_atomic_get_new_state(state);
9834 	if (dm_state && dm_state->context) {
9835 		dc_state = dm_state->context;
9836 	} else {
9837 		/* No state changes, retain current state. */
9838 		dc_state_temp = dc_create_state(dm->dc);
9839 		ASSERT(dc_state_temp);
9840 		dc_state = dc_state_temp;
9841 		dc_resource_state_copy_construct_current(dm->dc, dc_state);
9842 	}
9843 
9844 	for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
9845 				       new_crtc_state, i) {
9846 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9847 
9848 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9849 
9850 		if (old_crtc_state->active &&
9851 		    (!new_crtc_state->active ||
9852 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9853 			manage_dm_interrupts(adev, acrtc, false);
9854 			dc_stream_release(dm_old_crtc_state->stream);
9855 		}
9856 	}
9857 
9858 	drm_atomic_helper_calc_timestamping_constants(state);
9859 
9860 	/* update changed items */
9861 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9862 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9863 
9864 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9865 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9866 
9867 		drm_dbg_state(state->dev,
9868 			"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9869 			"planes_changed:%d, mode_changed:%d,active_changed:%d,"
9870 			"connectors_changed:%d\n",
9871 			acrtc->crtc_id,
9872 			new_crtc_state->enable,
9873 			new_crtc_state->active,
9874 			new_crtc_state->planes_changed,
9875 			new_crtc_state->mode_changed,
9876 			new_crtc_state->active_changed,
9877 			new_crtc_state->connectors_changed);
9878 
9879 		/* Disable cursor if disabling crtc */
9880 		if (old_crtc_state->active && !new_crtc_state->active) {
9881 			struct dc_cursor_position position;
9882 
9883 			memset(&position, 0, sizeof(position));
9884 			mutex_lock(&dm->dc_lock);
9885 			dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
9886 			mutex_unlock(&dm->dc_lock);
9887 		}
9888 
9889 		/* Copy all transient state flags into dc state */
9890 		if (dm_new_crtc_state->stream) {
9891 			amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
9892 							    dm_new_crtc_state->stream);
9893 		}
9894 
9895 		/* handles headless hotplug case, updating new_state and
9896 		 * aconnector as needed
9897 		 */
9898 
9899 		if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
9900 
9901 			DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
9902 
9903 			if (!dm_new_crtc_state->stream) {
9904 				/*
9905 				 * this could happen because of issues with
9906 				 * userspace notifications delivery.
9907 				 * In this case userspace tries to set mode on
9908 				 * display which is disconnected in fact.
9909 				 * dc_sink is NULL in this case on aconnector.
9910 				 * We expect reset mode will come soon.
9911 				 *
9912 				 * This can also happen when unplug is done
9913 				 * during resume sequence ended
9914 				 *
9915 				 * In this case, we want to pretend we still
9916 				 * have a sink to keep the pipe running so that
9917 				 * hw state is consistent with the sw state
9918 				 */
9919 				DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9920 						__func__, acrtc->base.base.id);
9921 				continue;
9922 			}
9923 
9924 			if (dm_old_crtc_state->stream)
9925 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
9926 
9927 			pm_runtime_get_noresume(dev->dev);
9928 
9929 			acrtc->enabled = true;
9930 			acrtc->hw_mode = new_crtc_state->mode;
9931 			crtc->hwmode = new_crtc_state->mode;
9932 			mode_set_reset_required = true;
9933 		} else if (modereset_required(new_crtc_state)) {
9934 			DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
9935 			/* i.e. reset mode */
9936 			if (dm_old_crtc_state->stream)
9937 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
9938 
9939 			mode_set_reset_required = true;
9940 		}
9941 	} /* for_each_crtc_in_state() */
9942 
9943 	if (dc_state) {
9944 		/* if there mode set or reset, disable eDP PSR */
9945 		if (mode_set_reset_required) {
9946 			if (dm->vblank_control_workqueue)
9947 				flush_workqueue(dm->vblank_control_workqueue);
9948 
9949 			amdgpu_dm_psr_disable_all(dm);
9950 		}
9951 
9952 		dm_enable_per_frame_crtc_master_sync(dc_state);
9953 		mutex_lock(&dm->dc_lock);
9954 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
9955 
9956 		/* Allow idle optimization when vblank count is 0 for display off */
9957 		if (dm->active_vblank_irq_count == 0)
9958 			dc_allow_idle_optimizations(dm->dc, true);
9959 		mutex_unlock(&dm->dc_lock);
9960 	}
9961 
9962 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9963 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9964 
9965 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9966 
9967 		if (dm_new_crtc_state->stream != NULL) {
9968 			const struct dc_stream_status *status =
9969 					dc_stream_get_status(dm_new_crtc_state->stream);
9970 
9971 			if (!status)
9972 				status = dc_stream_get_status_from_state(dc_state,
9973 									 dm_new_crtc_state->stream);
9974 			if (!status)
9975 				DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
9976 			else
9977 				acrtc->otg_inst = status->primary_otg_inst;
9978 		}
9979 	}
9980 #ifdef CONFIG_DRM_AMD_DC_HDCP
9981 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9982 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9983 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9984 		struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9985 
9986 		new_crtc_state = NULL;
9987 
9988 		if (acrtc)
9989 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9990 
9991 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9992 
9993 		if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
9994 		    connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
9995 			hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
9996 			new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
9997 			dm_new_con_state->update_hdcp = true;
9998 			continue;
9999 		}
10000 
10001 		if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
10002 			hdcp_update_display(
10003 				adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
10004 				new_con_state->hdcp_content_type,
10005 				new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
10006 	}
10007 #endif
10008 
10009 	/* Handle connector state changes */
10010 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10011 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10012 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10013 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
10014 		struct dc_surface_update dummy_updates[MAX_SURFACES];
10015 		struct dc_stream_update stream_update;
10016 		struct dc_info_packet hdr_packet;
10017 		struct dc_stream_status *status = NULL;
10018 		bool abm_changed, hdr_changed, scaling_changed;
10019 
10020 		memset(&dummy_updates, 0, sizeof(dummy_updates));
10021 		memset(&stream_update, 0, sizeof(stream_update));
10022 
10023 		if (acrtc) {
10024 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
10025 			old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
10026 		}
10027 
10028 		/* Skip any modesets/resets */
10029 		if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
10030 			continue;
10031 
10032 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10033 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10034 
10035 		scaling_changed = is_scaling_state_different(dm_new_con_state,
10036 							     dm_old_con_state);
10037 
10038 		abm_changed = dm_new_crtc_state->abm_level !=
10039 			      dm_old_crtc_state->abm_level;
10040 
10041 		hdr_changed =
10042 			!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
10043 
10044 		if (!scaling_changed && !abm_changed && !hdr_changed)
10045 			continue;
10046 
10047 		stream_update.stream = dm_new_crtc_state->stream;
10048 		if (scaling_changed) {
10049 			update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
10050 					dm_new_con_state, dm_new_crtc_state->stream);
10051 
10052 			stream_update.src = dm_new_crtc_state->stream->src;
10053 			stream_update.dst = dm_new_crtc_state->stream->dst;
10054 		}
10055 
10056 		if (abm_changed) {
10057 			dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
10058 
10059 			stream_update.abm_level = &dm_new_crtc_state->abm_level;
10060 		}
10061 
10062 		if (hdr_changed) {
10063 			fill_hdr_info_packet(new_con_state, &hdr_packet);
10064 			stream_update.hdr_static_metadata = &hdr_packet;
10065 		}
10066 
10067 		status = dc_stream_get_status(dm_new_crtc_state->stream);
10068 
10069 		if (WARN_ON(!status))
10070 			continue;
10071 
10072 		WARN_ON(!status->plane_count);
10073 
10074 		/*
10075 		 * TODO: DC refuses to perform stream updates without a dc_surface_update.
10076 		 * Here we create an empty update on each plane.
10077 		 * To fix this, DC should permit updating only stream properties.
10078 		 */
10079 		for (j = 0; j < status->plane_count; j++)
10080 			dummy_updates[j].surface = status->plane_states[0];
10081 
10082 
10083 		mutex_lock(&dm->dc_lock);
10084 		dc_commit_updates_for_stream(dm->dc,
10085 						     dummy_updates,
10086 						     status->plane_count,
10087 						     dm_new_crtc_state->stream,
10088 						     &stream_update,
10089 						     dc_state);
10090 		mutex_unlock(&dm->dc_lock);
10091 	}
10092 
10093 	/* Count number of newly disabled CRTCs for dropping PM refs later. */
10094 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
10095 				      new_crtc_state, i) {
10096 		if (old_crtc_state->active && !new_crtc_state->active)
10097 			crtc_disable_count++;
10098 
10099 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10100 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10101 
10102 		/* For freesync config update on crtc state and params for irq */
10103 		update_stream_irq_parameters(dm, dm_new_crtc_state);
10104 
10105 		/* Handle vrr on->off / off->on transitions */
10106 		amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
10107 						dm_new_crtc_state);
10108 	}
10109 
10110 	/**
10111 	 * Enable interrupts for CRTCs that are newly enabled or went through
10112 	 * a modeset. It was intentionally deferred until after the front end
10113 	 * state was modified to wait until the OTG was on and so the IRQ
10114 	 * handlers didn't access stale or invalid state.
10115 	 */
10116 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10117 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
10118 #ifdef CONFIG_DEBUG_FS
10119 		bool configure_crc = false;
10120 		enum amdgpu_dm_pipe_crc_source cur_crc_src;
10121 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
10122 		struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk;
10123 #endif
10124 		spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
10125 		cur_crc_src = acrtc->dm_irq_params.crc_src;
10126 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
10127 #endif
10128 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10129 
10130 		if (new_crtc_state->active &&
10131 		    (!old_crtc_state->active ||
10132 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
10133 			dc_stream_retain(dm_new_crtc_state->stream);
10134 			acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
10135 			manage_dm_interrupts(adev, acrtc, true);
10136 
10137 #ifdef CONFIG_DEBUG_FS
10138 			/**
10139 			 * Frontend may have changed so reapply the CRC capture
10140 			 * settings for the stream.
10141 			 */
10142 			dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10143 
10144 			if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
10145 				configure_crc = true;
10146 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
10147 				if (amdgpu_dm_crc_window_is_activated(crtc)) {
10148 					spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
10149 					acrtc->dm_irq_params.crc_window.update_win = true;
10150 					acrtc->dm_irq_params.crc_window.skip_frame_cnt = 2;
10151 					spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
10152 					crc_rd_wrk->crtc = crtc;
10153 					spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
10154 					spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
10155 				}
10156 #endif
10157 			}
10158 
10159 			if (configure_crc)
10160 				if (amdgpu_dm_crtc_configure_crc_source(
10161 					crtc, dm_new_crtc_state, cur_crc_src))
10162 					DRM_DEBUG_DRIVER("Failed to configure crc source");
10163 #endif
10164 		}
10165 	}
10166 
10167 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
10168 		if (new_crtc_state->async_flip)
10169 			wait_for_vblank = false;
10170 
10171 	/* update planes when needed per crtc*/
10172 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
10173 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10174 
10175 		if (dm_new_crtc_state->stream)
10176 			amdgpu_dm_commit_planes(state, dc_state, dev,
10177 						dm, crtc, wait_for_vblank);
10178 	}
10179 
10180 	/* Update audio instances for each connector. */
10181 	amdgpu_dm_commit_audio(dev, state);
10182 
10183 	/* restore the backlight level */
10184 	for (i = 0; i < dm->num_of_edps; i++) {
10185 		if (dm->backlight_dev[i] &&
10186 		    (dm->actual_brightness[i] != dm->brightness[i]))
10187 			amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
10188 	}
10189 
10190 	/*
10191 	 * send vblank event on all events not handled in flip and
10192 	 * mark consumed event for drm_atomic_helper_commit_hw_done
10193 	 */
10194 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
10195 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
10196 
10197 		if (new_crtc_state->event)
10198 			drm_send_event_locked(dev, &new_crtc_state->event->base);
10199 
10200 		new_crtc_state->event = NULL;
10201 	}
10202 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
10203 
10204 	/* Signal HW programming completion */
10205 	drm_atomic_helper_commit_hw_done(state);
10206 
10207 	if (wait_for_vblank)
10208 		drm_atomic_helper_wait_for_flip_done(dev, state);
10209 
10210 	drm_atomic_helper_cleanup_planes(dev, state);
10211 
10212 	/* return the stolen vga memory back to VRAM */
10213 	if (!adev->mman.keep_stolen_vga_memory)
10214 		amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
10215 	amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
10216 
10217 	/*
10218 	 * Finally, drop a runtime PM reference for each newly disabled CRTC,
10219 	 * so we can put the GPU into runtime suspend if we're not driving any
10220 	 * displays anymore
10221 	 */
10222 	for (i = 0; i < crtc_disable_count; i++)
10223 		pm_runtime_put_autosuspend(dev->dev);
10224 	pm_runtime_mark_last_busy(dev->dev);
10225 
10226 	if (dc_state_temp)
10227 		dc_release_state(dc_state_temp);
10228 }
10229 
10230 
10231 static int dm_force_atomic_commit(struct drm_connector *connector)
10232 {
10233 	int ret = 0;
10234 	struct drm_device *ddev = connector->dev;
10235 	struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
10236 	struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
10237 	struct drm_plane *plane = disconnected_acrtc->base.primary;
10238 	struct drm_connector_state *conn_state;
10239 	struct drm_crtc_state *crtc_state;
10240 	struct drm_plane_state *plane_state;
10241 
10242 	if (!state)
10243 		return -ENOMEM;
10244 
10245 	state->acquire_ctx = ddev->mode_config.acquire_ctx;
10246 
10247 	/* Construct an atomic state to restore previous display setting */
10248 
10249 	/*
10250 	 * Attach connectors to drm_atomic_state
10251 	 */
10252 	conn_state = drm_atomic_get_connector_state(state, connector);
10253 
10254 	ret = PTR_ERR_OR_ZERO(conn_state);
10255 	if (ret)
10256 		goto out;
10257 
10258 	/* Attach crtc to drm_atomic_state*/
10259 	crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
10260 
10261 	ret = PTR_ERR_OR_ZERO(crtc_state);
10262 	if (ret)
10263 		goto out;
10264 
10265 	/* force a restore */
10266 	crtc_state->mode_changed = true;
10267 
10268 	/* Attach plane to drm_atomic_state */
10269 	plane_state = drm_atomic_get_plane_state(state, plane);
10270 
10271 	ret = PTR_ERR_OR_ZERO(plane_state);
10272 	if (ret)
10273 		goto out;
10274 
10275 	/* Call commit internally with the state we just constructed */
10276 	ret = drm_atomic_commit(state);
10277 
10278 out:
10279 	drm_atomic_state_put(state);
10280 	if (ret)
10281 		DRM_ERROR("Restoring old state failed with %i\n", ret);
10282 
10283 	return ret;
10284 }
10285 
10286 /*
10287  * This function handles all cases when set mode does not come upon hotplug.
10288  * This includes when a display is unplugged then plugged back into the
10289  * same port and when running without usermode desktop manager supprot
10290  */
10291 void dm_restore_drm_connector_state(struct drm_device *dev,
10292 				    struct drm_connector *connector)
10293 {
10294 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
10295 	struct amdgpu_crtc *disconnected_acrtc;
10296 	struct dm_crtc_state *acrtc_state;
10297 
10298 	if (!aconnector->dc_sink || !connector->state || !connector->encoder)
10299 		return;
10300 
10301 	disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
10302 	if (!disconnected_acrtc)
10303 		return;
10304 
10305 	acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
10306 	if (!acrtc_state->stream)
10307 		return;
10308 
10309 	/*
10310 	 * If the previous sink is not released and different from the current,
10311 	 * we deduce we are in a state where we can not rely on usermode call
10312 	 * to turn on the display, so we do it here
10313 	 */
10314 	if (acrtc_state->stream->sink != aconnector->dc_sink)
10315 		dm_force_atomic_commit(&aconnector->base);
10316 }
10317 
10318 /*
10319  * Grabs all modesetting locks to serialize against any blocking commits,
10320  * Waits for completion of all non blocking commits.
10321  */
10322 static int do_aquire_global_lock(struct drm_device *dev,
10323 				 struct drm_atomic_state *state)
10324 {
10325 	struct drm_crtc *crtc;
10326 	struct drm_crtc_commit *commit;
10327 	long ret;
10328 
10329 	/*
10330 	 * Adding all modeset locks to aquire_ctx will
10331 	 * ensure that when the framework release it the
10332 	 * extra locks we are locking here will get released to
10333 	 */
10334 	ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
10335 	if (ret)
10336 		return ret;
10337 
10338 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
10339 		spin_lock(&crtc->commit_lock);
10340 		commit = list_first_entry_or_null(&crtc->commit_list,
10341 				struct drm_crtc_commit, commit_entry);
10342 		if (commit)
10343 			drm_crtc_commit_get(commit);
10344 		spin_unlock(&crtc->commit_lock);
10345 
10346 		if (!commit)
10347 			continue;
10348 
10349 		/*
10350 		 * Make sure all pending HW programming completed and
10351 		 * page flips done
10352 		 */
10353 		ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
10354 
10355 		if (ret > 0)
10356 			ret = wait_for_completion_interruptible_timeout(
10357 					&commit->flip_done, 10*HZ);
10358 
10359 		if (ret == 0)
10360 			DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
10361 				  "timed out\n", crtc->base.id, crtc->name);
10362 
10363 		drm_crtc_commit_put(commit);
10364 	}
10365 
10366 	return ret < 0 ? ret : 0;
10367 }
10368 
10369 static void get_freesync_config_for_crtc(
10370 	struct dm_crtc_state *new_crtc_state,
10371 	struct dm_connector_state *new_con_state)
10372 {
10373 	struct mod_freesync_config config = {0};
10374 	struct amdgpu_dm_connector *aconnector =
10375 			to_amdgpu_dm_connector(new_con_state->base.connector);
10376 	struct drm_display_mode *mode = &new_crtc_state->base.mode;
10377 	int vrefresh = drm_mode_vrefresh(mode);
10378 	bool fs_vid_mode = false;
10379 
10380 	new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
10381 					vrefresh >= aconnector->min_vfreq &&
10382 					vrefresh <= aconnector->max_vfreq;
10383 
10384 	if (new_crtc_state->vrr_supported) {
10385 		new_crtc_state->stream->ignore_msa_timing_param = true;
10386 		fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
10387 
10388 		config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
10389 		config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
10390 		config.vsif_supported = true;
10391 		config.btr = true;
10392 
10393 		if (fs_vid_mode) {
10394 			config.state = VRR_STATE_ACTIVE_FIXED;
10395 			config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
10396 			goto out;
10397 		} else if (new_crtc_state->base.vrr_enabled) {
10398 			config.state = VRR_STATE_ACTIVE_VARIABLE;
10399 		} else {
10400 			config.state = VRR_STATE_INACTIVE;
10401 		}
10402 	}
10403 out:
10404 	new_crtc_state->freesync_config = config;
10405 }
10406 
10407 static void reset_freesync_config_for_crtc(
10408 	struct dm_crtc_state *new_crtc_state)
10409 {
10410 	new_crtc_state->vrr_supported = false;
10411 
10412 	memset(&new_crtc_state->vrr_infopacket, 0,
10413 	       sizeof(new_crtc_state->vrr_infopacket));
10414 }
10415 
10416 static bool
10417 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
10418 				 struct drm_crtc_state *new_crtc_state)
10419 {
10420 	const struct drm_display_mode *old_mode, *new_mode;
10421 
10422 	if (!old_crtc_state || !new_crtc_state)
10423 		return false;
10424 
10425 	old_mode = &old_crtc_state->mode;
10426 	new_mode = &new_crtc_state->mode;
10427 
10428 	if (old_mode->clock       == new_mode->clock &&
10429 	    old_mode->hdisplay    == new_mode->hdisplay &&
10430 	    old_mode->vdisplay    == new_mode->vdisplay &&
10431 	    old_mode->htotal      == new_mode->htotal &&
10432 	    old_mode->vtotal      != new_mode->vtotal &&
10433 	    old_mode->hsync_start == new_mode->hsync_start &&
10434 	    old_mode->vsync_start != new_mode->vsync_start &&
10435 	    old_mode->hsync_end   == new_mode->hsync_end &&
10436 	    old_mode->vsync_end   != new_mode->vsync_end &&
10437 	    old_mode->hskew       == new_mode->hskew &&
10438 	    old_mode->vscan       == new_mode->vscan &&
10439 	    (old_mode->vsync_end - old_mode->vsync_start) ==
10440 	    (new_mode->vsync_end - new_mode->vsync_start))
10441 		return true;
10442 
10443 	return false;
10444 }
10445 
10446 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
10447 	uint64_t num, den, res;
10448 	struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
10449 
10450 	dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
10451 
10452 	num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
10453 	den = (unsigned long long)new_crtc_state->mode.htotal *
10454 	      (unsigned long long)new_crtc_state->mode.vtotal;
10455 
10456 	res = div_u64(num, den);
10457 	dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
10458 }
10459 
10460 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
10461 			 struct drm_atomic_state *state,
10462 			 struct drm_crtc *crtc,
10463 			 struct drm_crtc_state *old_crtc_state,
10464 			 struct drm_crtc_state *new_crtc_state,
10465 			 bool enable,
10466 			 bool *lock_and_validation_needed)
10467 {
10468 	struct dm_atomic_state *dm_state = NULL;
10469 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
10470 	struct dc_stream_state *new_stream;
10471 	int ret = 0;
10472 
10473 	/*
10474 	 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
10475 	 * update changed items
10476 	 */
10477 	struct amdgpu_crtc *acrtc = NULL;
10478 	struct amdgpu_dm_connector *aconnector = NULL;
10479 	struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
10480 	struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
10481 
10482 	new_stream = NULL;
10483 
10484 	dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10485 	dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10486 	acrtc = to_amdgpu_crtc(crtc);
10487 	aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
10488 
10489 	/* TODO This hack should go away */
10490 	if (aconnector && enable) {
10491 		/* Make sure fake sink is created in plug-in scenario */
10492 		drm_new_conn_state = drm_atomic_get_new_connector_state(state,
10493 							    &aconnector->base);
10494 		drm_old_conn_state = drm_atomic_get_old_connector_state(state,
10495 							    &aconnector->base);
10496 
10497 		if (IS_ERR(drm_new_conn_state)) {
10498 			ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
10499 			goto fail;
10500 		}
10501 
10502 		dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
10503 		dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
10504 
10505 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10506 			goto skip_modeset;
10507 
10508 		new_stream = create_validate_stream_for_sink(aconnector,
10509 							     &new_crtc_state->mode,
10510 							     dm_new_conn_state,
10511 							     dm_old_crtc_state->stream);
10512 
10513 		/*
10514 		 * we can have no stream on ACTION_SET if a display
10515 		 * was disconnected during S3, in this case it is not an
10516 		 * error, the OS will be updated after detection, and
10517 		 * will do the right thing on next atomic commit
10518 		 */
10519 
10520 		if (!new_stream) {
10521 			DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
10522 					__func__, acrtc->base.base.id);
10523 			ret = -ENOMEM;
10524 			goto fail;
10525 		}
10526 
10527 		/*
10528 		 * TODO: Check VSDB bits to decide whether this should
10529 		 * be enabled or not.
10530 		 */
10531 		new_stream->triggered_crtc_reset.enabled =
10532 			dm->force_timing_sync;
10533 
10534 		dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10535 
10536 		ret = fill_hdr_info_packet(drm_new_conn_state,
10537 					   &new_stream->hdr_static_metadata);
10538 		if (ret)
10539 			goto fail;
10540 
10541 		/*
10542 		 * If we already removed the old stream from the context
10543 		 * (and set the new stream to NULL) then we can't reuse
10544 		 * the old stream even if the stream and scaling are unchanged.
10545 		 * We'll hit the BUG_ON and black screen.
10546 		 *
10547 		 * TODO: Refactor this function to allow this check to work
10548 		 * in all conditions.
10549 		 */
10550 		if (dm_new_crtc_state->stream &&
10551 		    is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
10552 			goto skip_modeset;
10553 
10554 		if (dm_new_crtc_state->stream &&
10555 		    dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
10556 		    dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
10557 			new_crtc_state->mode_changed = false;
10558 			DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
10559 					 new_crtc_state->mode_changed);
10560 		}
10561 	}
10562 
10563 	/* mode_changed flag may get updated above, need to check again */
10564 	if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10565 		goto skip_modeset;
10566 
10567 	drm_dbg_state(state->dev,
10568 		"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
10569 		"planes_changed:%d, mode_changed:%d,active_changed:%d,"
10570 		"connectors_changed:%d\n",
10571 		acrtc->crtc_id,
10572 		new_crtc_state->enable,
10573 		new_crtc_state->active,
10574 		new_crtc_state->planes_changed,
10575 		new_crtc_state->mode_changed,
10576 		new_crtc_state->active_changed,
10577 		new_crtc_state->connectors_changed);
10578 
10579 	/* Remove stream for any changed/disabled CRTC */
10580 	if (!enable) {
10581 
10582 		if (!dm_old_crtc_state->stream)
10583 			goto skip_modeset;
10584 
10585 		if (dm_new_crtc_state->stream &&
10586 		    is_timing_unchanged_for_freesync(new_crtc_state,
10587 						     old_crtc_state)) {
10588 			new_crtc_state->mode_changed = false;
10589 			DRM_DEBUG_DRIVER(
10590 				"Mode change not required for front porch change, "
10591 				"setting mode_changed to %d",
10592 				new_crtc_state->mode_changed);
10593 
10594 			set_freesync_fixed_config(dm_new_crtc_state);
10595 
10596 			goto skip_modeset;
10597 		} else if (aconnector &&
10598 			   is_freesync_video_mode(&new_crtc_state->mode,
10599 						  aconnector)) {
10600 			struct drm_display_mode *high_mode;
10601 
10602 			high_mode = get_highest_refresh_rate_mode(aconnector, false);
10603 			if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) {
10604 				set_freesync_fixed_config(dm_new_crtc_state);
10605 			}
10606 		}
10607 
10608 		ret = dm_atomic_get_state(state, &dm_state);
10609 		if (ret)
10610 			goto fail;
10611 
10612 		DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
10613 				crtc->base.id);
10614 
10615 		/* i.e. reset mode */
10616 		if (dc_remove_stream_from_ctx(
10617 				dm->dc,
10618 				dm_state->context,
10619 				dm_old_crtc_state->stream) != DC_OK) {
10620 			ret = -EINVAL;
10621 			goto fail;
10622 		}
10623 
10624 		dc_stream_release(dm_old_crtc_state->stream);
10625 		dm_new_crtc_state->stream = NULL;
10626 
10627 		reset_freesync_config_for_crtc(dm_new_crtc_state);
10628 
10629 		*lock_and_validation_needed = true;
10630 
10631 	} else {/* Add stream for any updated/enabled CRTC */
10632 		/*
10633 		 * Quick fix to prevent NULL pointer on new_stream when
10634 		 * added MST connectors not found in existing crtc_state in the chained mode
10635 		 * TODO: need to dig out the root cause of that
10636 		 */
10637 		if (!aconnector)
10638 			goto skip_modeset;
10639 
10640 		if (modereset_required(new_crtc_state))
10641 			goto skip_modeset;
10642 
10643 		if (modeset_required(new_crtc_state, new_stream,
10644 				     dm_old_crtc_state->stream)) {
10645 
10646 			WARN_ON(dm_new_crtc_state->stream);
10647 
10648 			ret = dm_atomic_get_state(state, &dm_state);
10649 			if (ret)
10650 				goto fail;
10651 
10652 			dm_new_crtc_state->stream = new_stream;
10653 
10654 			dc_stream_retain(new_stream);
10655 
10656 			DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
10657 					 crtc->base.id);
10658 
10659 			if (dc_add_stream_to_ctx(
10660 					dm->dc,
10661 					dm_state->context,
10662 					dm_new_crtc_state->stream) != DC_OK) {
10663 				ret = -EINVAL;
10664 				goto fail;
10665 			}
10666 
10667 			*lock_and_validation_needed = true;
10668 		}
10669 	}
10670 
10671 skip_modeset:
10672 	/* Release extra reference */
10673 	if (new_stream)
10674 		 dc_stream_release(new_stream);
10675 
10676 	/*
10677 	 * We want to do dc stream updates that do not require a
10678 	 * full modeset below.
10679 	 */
10680 	if (!(enable && aconnector && new_crtc_state->active))
10681 		return 0;
10682 	/*
10683 	 * Given above conditions, the dc state cannot be NULL because:
10684 	 * 1. We're in the process of enabling CRTCs (just been added
10685 	 *    to the dc context, or already is on the context)
10686 	 * 2. Has a valid connector attached, and
10687 	 * 3. Is currently active and enabled.
10688 	 * => The dc stream state currently exists.
10689 	 */
10690 	BUG_ON(dm_new_crtc_state->stream == NULL);
10691 
10692 	/* Scaling or underscan settings */
10693 	if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
10694 				drm_atomic_crtc_needs_modeset(new_crtc_state))
10695 		update_stream_scaling_settings(
10696 			&new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
10697 
10698 	/* ABM settings */
10699 	dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10700 
10701 	/*
10702 	 * Color management settings. We also update color properties
10703 	 * when a modeset is needed, to ensure it gets reprogrammed.
10704 	 */
10705 	if (dm_new_crtc_state->base.color_mgmt_changed ||
10706 	    drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10707 		ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
10708 		if (ret)
10709 			goto fail;
10710 	}
10711 
10712 	/* Update Freesync settings. */
10713 	get_freesync_config_for_crtc(dm_new_crtc_state,
10714 				     dm_new_conn_state);
10715 
10716 	return ret;
10717 
10718 fail:
10719 	if (new_stream)
10720 		dc_stream_release(new_stream);
10721 	return ret;
10722 }
10723 
10724 static bool should_reset_plane(struct drm_atomic_state *state,
10725 			       struct drm_plane *plane,
10726 			       struct drm_plane_state *old_plane_state,
10727 			       struct drm_plane_state *new_plane_state)
10728 {
10729 	struct drm_plane *other;
10730 	struct drm_plane_state *old_other_state, *new_other_state;
10731 	struct drm_crtc_state *new_crtc_state;
10732 	int i;
10733 
10734 	/*
10735 	 * TODO: Remove this hack once the checks below are sufficient
10736 	 * enough to determine when we need to reset all the planes on
10737 	 * the stream.
10738 	 */
10739 	if (state->allow_modeset)
10740 		return true;
10741 
10742 	/* Exit early if we know that we're adding or removing the plane. */
10743 	if (old_plane_state->crtc != new_plane_state->crtc)
10744 		return true;
10745 
10746 	/* old crtc == new_crtc == NULL, plane not in context. */
10747 	if (!new_plane_state->crtc)
10748 		return false;
10749 
10750 	new_crtc_state =
10751 		drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
10752 
10753 	if (!new_crtc_state)
10754 		return true;
10755 
10756 	/* CRTC Degamma changes currently require us to recreate planes. */
10757 	if (new_crtc_state->color_mgmt_changed)
10758 		return true;
10759 
10760 	if (drm_atomic_crtc_needs_modeset(new_crtc_state))
10761 		return true;
10762 
10763 	/*
10764 	 * If there are any new primary or overlay planes being added or
10765 	 * removed then the z-order can potentially change. To ensure
10766 	 * correct z-order and pipe acquisition the current DC architecture
10767 	 * requires us to remove and recreate all existing planes.
10768 	 *
10769 	 * TODO: Come up with a more elegant solution for this.
10770 	 */
10771 	for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
10772 		struct amdgpu_framebuffer *old_afb, *new_afb;
10773 		if (other->type == DRM_PLANE_TYPE_CURSOR)
10774 			continue;
10775 
10776 		if (old_other_state->crtc != new_plane_state->crtc &&
10777 		    new_other_state->crtc != new_plane_state->crtc)
10778 			continue;
10779 
10780 		if (old_other_state->crtc != new_other_state->crtc)
10781 			return true;
10782 
10783 		/* Src/dst size and scaling updates. */
10784 		if (old_other_state->src_w != new_other_state->src_w ||
10785 		    old_other_state->src_h != new_other_state->src_h ||
10786 		    old_other_state->crtc_w != new_other_state->crtc_w ||
10787 		    old_other_state->crtc_h != new_other_state->crtc_h)
10788 			return true;
10789 
10790 		/* Rotation / mirroring updates. */
10791 		if (old_other_state->rotation != new_other_state->rotation)
10792 			return true;
10793 
10794 		/* Blending updates. */
10795 		if (old_other_state->pixel_blend_mode !=
10796 		    new_other_state->pixel_blend_mode)
10797 			return true;
10798 
10799 		/* Alpha updates. */
10800 		if (old_other_state->alpha != new_other_state->alpha)
10801 			return true;
10802 
10803 		/* Colorspace changes. */
10804 		if (old_other_state->color_range != new_other_state->color_range ||
10805 		    old_other_state->color_encoding != new_other_state->color_encoding)
10806 			return true;
10807 
10808 		/* Framebuffer checks fall at the end. */
10809 		if (!old_other_state->fb || !new_other_state->fb)
10810 			continue;
10811 
10812 		/* Pixel format changes can require bandwidth updates. */
10813 		if (old_other_state->fb->format != new_other_state->fb->format)
10814 			return true;
10815 
10816 		old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
10817 		new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
10818 
10819 		/* Tiling and DCC changes also require bandwidth updates. */
10820 		if (old_afb->tiling_flags != new_afb->tiling_flags ||
10821 		    old_afb->base.modifier != new_afb->base.modifier)
10822 			return true;
10823 	}
10824 
10825 	return false;
10826 }
10827 
10828 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
10829 			      struct drm_plane_state *new_plane_state,
10830 			      struct drm_framebuffer *fb)
10831 {
10832 	struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
10833 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
10834 	unsigned int pitch;
10835 	bool linear;
10836 
10837 	if (fb->width > new_acrtc->max_cursor_width ||
10838 	    fb->height > new_acrtc->max_cursor_height) {
10839 		DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
10840 				 new_plane_state->fb->width,
10841 				 new_plane_state->fb->height);
10842 		return -EINVAL;
10843 	}
10844 	if (new_plane_state->src_w != fb->width << 16 ||
10845 	    new_plane_state->src_h != fb->height << 16) {
10846 		DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10847 		return -EINVAL;
10848 	}
10849 
10850 	/* Pitch in pixels */
10851 	pitch = fb->pitches[0] / fb->format->cpp[0];
10852 
10853 	if (fb->width != pitch) {
10854 		DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
10855 				 fb->width, pitch);
10856 		return -EINVAL;
10857 	}
10858 
10859 	switch (pitch) {
10860 	case 64:
10861 	case 128:
10862 	case 256:
10863 		/* FB pitch is supported by cursor plane */
10864 		break;
10865 	default:
10866 		DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
10867 		return -EINVAL;
10868 	}
10869 
10870 	/* Core DRM takes care of checking FB modifiers, so we only need to
10871 	 * check tiling flags when the FB doesn't have a modifier. */
10872 	if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
10873 		if (adev->family < AMDGPU_FAMILY_AI) {
10874 			linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
10875 			         AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
10876 				 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
10877 		} else {
10878 			linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
10879 		}
10880 		if (!linear) {
10881 			DRM_DEBUG_ATOMIC("Cursor FB not linear");
10882 			return -EINVAL;
10883 		}
10884 	}
10885 
10886 	return 0;
10887 }
10888 
10889 static int dm_update_plane_state(struct dc *dc,
10890 				 struct drm_atomic_state *state,
10891 				 struct drm_plane *plane,
10892 				 struct drm_plane_state *old_plane_state,
10893 				 struct drm_plane_state *new_plane_state,
10894 				 bool enable,
10895 				 bool *lock_and_validation_needed)
10896 {
10897 
10898 	struct dm_atomic_state *dm_state = NULL;
10899 	struct drm_crtc *new_plane_crtc, *old_plane_crtc;
10900 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10901 	struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
10902 	struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
10903 	struct amdgpu_crtc *new_acrtc;
10904 	bool needs_reset;
10905 	int ret = 0;
10906 
10907 
10908 	new_plane_crtc = new_plane_state->crtc;
10909 	old_plane_crtc = old_plane_state->crtc;
10910 	dm_new_plane_state = to_dm_plane_state(new_plane_state);
10911 	dm_old_plane_state = to_dm_plane_state(old_plane_state);
10912 
10913 	if (plane->type == DRM_PLANE_TYPE_CURSOR) {
10914 		if (!enable || !new_plane_crtc ||
10915 			drm_atomic_plane_disabling(plane->state, new_plane_state))
10916 			return 0;
10917 
10918 		new_acrtc = to_amdgpu_crtc(new_plane_crtc);
10919 
10920 		if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
10921 			DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10922 			return -EINVAL;
10923 		}
10924 
10925 		if (new_plane_state->fb) {
10926 			ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
10927 						 new_plane_state->fb);
10928 			if (ret)
10929 				return ret;
10930 		}
10931 
10932 		return 0;
10933 	}
10934 
10935 	needs_reset = should_reset_plane(state, plane, old_plane_state,
10936 					 new_plane_state);
10937 
10938 	/* Remove any changed/removed planes */
10939 	if (!enable) {
10940 		if (!needs_reset)
10941 			return 0;
10942 
10943 		if (!old_plane_crtc)
10944 			return 0;
10945 
10946 		old_crtc_state = drm_atomic_get_old_crtc_state(
10947 				state, old_plane_crtc);
10948 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10949 
10950 		if (!dm_old_crtc_state->stream)
10951 			return 0;
10952 
10953 		DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
10954 				plane->base.id, old_plane_crtc->base.id);
10955 
10956 		ret = dm_atomic_get_state(state, &dm_state);
10957 		if (ret)
10958 			return ret;
10959 
10960 		if (!dc_remove_plane_from_context(
10961 				dc,
10962 				dm_old_crtc_state->stream,
10963 				dm_old_plane_state->dc_state,
10964 				dm_state->context)) {
10965 
10966 			return -EINVAL;
10967 		}
10968 
10969 
10970 		dc_plane_state_release(dm_old_plane_state->dc_state);
10971 		dm_new_plane_state->dc_state = NULL;
10972 
10973 		*lock_and_validation_needed = true;
10974 
10975 	} else { /* Add new planes */
10976 		struct dc_plane_state *dc_new_plane_state;
10977 
10978 		if (drm_atomic_plane_disabling(plane->state, new_plane_state))
10979 			return 0;
10980 
10981 		if (!new_plane_crtc)
10982 			return 0;
10983 
10984 		new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
10985 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10986 
10987 		if (!dm_new_crtc_state->stream)
10988 			return 0;
10989 
10990 		if (!needs_reset)
10991 			return 0;
10992 
10993 		ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
10994 		if (ret)
10995 			return ret;
10996 
10997 		WARN_ON(dm_new_plane_state->dc_state);
10998 
10999 		dc_new_plane_state = dc_create_plane_state(dc);
11000 		if (!dc_new_plane_state)
11001 			return -ENOMEM;
11002 
11003 		DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
11004 				 plane->base.id, new_plane_crtc->base.id);
11005 
11006 		ret = fill_dc_plane_attributes(
11007 			drm_to_adev(new_plane_crtc->dev),
11008 			dc_new_plane_state,
11009 			new_plane_state,
11010 			new_crtc_state);
11011 		if (ret) {
11012 			dc_plane_state_release(dc_new_plane_state);
11013 			return ret;
11014 		}
11015 
11016 		ret = dm_atomic_get_state(state, &dm_state);
11017 		if (ret) {
11018 			dc_plane_state_release(dc_new_plane_state);
11019 			return ret;
11020 		}
11021 
11022 		/*
11023 		 * Any atomic check errors that occur after this will
11024 		 * not need a release. The plane state will be attached
11025 		 * to the stream, and therefore part of the atomic
11026 		 * state. It'll be released when the atomic state is
11027 		 * cleaned.
11028 		 */
11029 		if (!dc_add_plane_to_context(
11030 				dc,
11031 				dm_new_crtc_state->stream,
11032 				dc_new_plane_state,
11033 				dm_state->context)) {
11034 
11035 			dc_plane_state_release(dc_new_plane_state);
11036 			return -EINVAL;
11037 		}
11038 
11039 		dm_new_plane_state->dc_state = dc_new_plane_state;
11040 
11041 		dm_new_crtc_state->mpo_requested |= (plane->type == DRM_PLANE_TYPE_OVERLAY);
11042 
11043 		/* Tell DC to do a full surface update every time there
11044 		 * is a plane change. Inefficient, but works for now.
11045 		 */
11046 		dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
11047 
11048 		*lock_and_validation_needed = true;
11049 	}
11050 
11051 
11052 	return ret;
11053 }
11054 
11055 static void dm_get_oriented_plane_size(struct drm_plane_state *plane_state,
11056 				       int *src_w, int *src_h)
11057 {
11058 	switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
11059 	case DRM_MODE_ROTATE_90:
11060 	case DRM_MODE_ROTATE_270:
11061 		*src_w = plane_state->src_h >> 16;
11062 		*src_h = plane_state->src_w >> 16;
11063 		break;
11064 	case DRM_MODE_ROTATE_0:
11065 	case DRM_MODE_ROTATE_180:
11066 	default:
11067 		*src_w = plane_state->src_w >> 16;
11068 		*src_h = plane_state->src_h >> 16;
11069 		break;
11070 	}
11071 }
11072 
11073 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
11074 				struct drm_crtc *crtc,
11075 				struct drm_crtc_state *new_crtc_state)
11076 {
11077 	struct drm_plane *cursor = crtc->cursor, *underlying;
11078 	struct drm_plane_state *new_cursor_state, *new_underlying_state;
11079 	int i;
11080 	int cursor_scale_w, cursor_scale_h, underlying_scale_w, underlying_scale_h;
11081 	int cursor_src_w, cursor_src_h;
11082 	int underlying_src_w, underlying_src_h;
11083 
11084 	/* On DCE and DCN there is no dedicated hardware cursor plane. We get a
11085 	 * cursor per pipe but it's going to inherit the scaling and
11086 	 * positioning from the underlying pipe. Check the cursor plane's
11087 	 * blending properties match the underlying planes'. */
11088 
11089 	new_cursor_state = drm_atomic_get_new_plane_state(state, cursor);
11090 	if (!new_cursor_state || !new_cursor_state->fb) {
11091 		return 0;
11092 	}
11093 
11094 	dm_get_oriented_plane_size(new_cursor_state, &cursor_src_w, &cursor_src_h);
11095 	cursor_scale_w = new_cursor_state->crtc_w * 1000 / cursor_src_w;
11096 	cursor_scale_h = new_cursor_state->crtc_h * 1000 / cursor_src_h;
11097 
11098 	for_each_new_plane_in_state_reverse(state, underlying, new_underlying_state, i) {
11099 		/* Narrow down to non-cursor planes on the same CRTC as the cursor */
11100 		if (new_underlying_state->crtc != crtc || underlying == crtc->cursor)
11101 			continue;
11102 
11103 		/* Ignore disabled planes */
11104 		if (!new_underlying_state->fb)
11105 			continue;
11106 
11107 		dm_get_oriented_plane_size(new_underlying_state,
11108 					   &underlying_src_w, &underlying_src_h);
11109 		underlying_scale_w = new_underlying_state->crtc_w * 1000 / underlying_src_w;
11110 		underlying_scale_h = new_underlying_state->crtc_h * 1000 / underlying_src_h;
11111 
11112 		if (cursor_scale_w != underlying_scale_w ||
11113 		    cursor_scale_h != underlying_scale_h) {
11114 			drm_dbg_atomic(crtc->dev,
11115 				       "Cursor [PLANE:%d:%s] scaling doesn't match underlying [PLANE:%d:%s]\n",
11116 				       cursor->base.id, cursor->name, underlying->base.id, underlying->name);
11117 			return -EINVAL;
11118 		}
11119 
11120 		/* If this plane covers the whole CRTC, no need to check planes underneath */
11121 		if (new_underlying_state->crtc_x <= 0 &&
11122 		    new_underlying_state->crtc_y <= 0 &&
11123 		    new_underlying_state->crtc_x + new_underlying_state->crtc_w >= new_crtc_state->mode.hdisplay &&
11124 		    new_underlying_state->crtc_y + new_underlying_state->crtc_h >= new_crtc_state->mode.vdisplay)
11125 			break;
11126 	}
11127 
11128 	return 0;
11129 }
11130 
11131 #if defined(CONFIG_DRM_AMD_DC_DCN)
11132 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
11133 {
11134 	struct drm_connector *connector;
11135 	struct drm_connector_state *conn_state, *old_conn_state;
11136 	struct amdgpu_dm_connector *aconnector = NULL;
11137 	int i;
11138 	for_each_oldnew_connector_in_state(state, connector, old_conn_state, conn_state, i) {
11139 		if (!conn_state->crtc)
11140 			conn_state = old_conn_state;
11141 
11142 		if (conn_state->crtc != crtc)
11143 			continue;
11144 
11145 		aconnector = to_amdgpu_dm_connector(connector);
11146 		if (!aconnector->port || !aconnector->mst_port)
11147 			aconnector = NULL;
11148 		else
11149 			break;
11150 	}
11151 
11152 	if (!aconnector)
11153 		return 0;
11154 
11155 	return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
11156 }
11157 #endif
11158 
11159 /**
11160  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
11161  * @dev: The DRM device
11162  * @state: The atomic state to commit
11163  *
11164  * Validate that the given atomic state is programmable by DC into hardware.
11165  * This involves constructing a &struct dc_state reflecting the new hardware
11166  * state we wish to commit, then querying DC to see if it is programmable. It's
11167  * important not to modify the existing DC state. Otherwise, atomic_check
11168  * may unexpectedly commit hardware changes.
11169  *
11170  * When validating the DC state, it's important that the right locks are
11171  * acquired. For full updates case which removes/adds/updates streams on one
11172  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
11173  * that any such full update commit will wait for completion of any outstanding
11174  * flip using DRMs synchronization events.
11175  *
11176  * Note that DM adds the affected connectors for all CRTCs in state, when that
11177  * might not seem necessary. This is because DC stream creation requires the
11178  * DC sink, which is tied to the DRM connector state. Cleaning this up should
11179  * be possible but non-trivial - a possible TODO item.
11180  *
11181  * Return: -Error code if validation failed.
11182  */
11183 static int amdgpu_dm_atomic_check(struct drm_device *dev,
11184 				  struct drm_atomic_state *state)
11185 {
11186 	struct amdgpu_device *adev = drm_to_adev(dev);
11187 	struct dm_atomic_state *dm_state = NULL;
11188 	struct dc *dc = adev->dm.dc;
11189 	struct drm_connector *connector;
11190 	struct drm_connector_state *old_con_state, *new_con_state;
11191 	struct drm_crtc *crtc;
11192 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
11193 	struct drm_plane *plane;
11194 	struct drm_plane_state *old_plane_state, *new_plane_state;
11195 	enum dc_status status;
11196 	int ret, i;
11197 	bool lock_and_validation_needed = false;
11198 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
11199 #if defined(CONFIG_DRM_AMD_DC_DCN)
11200 	struct dsc_mst_fairness_vars vars[MAX_PIPES];
11201 	struct drm_dp_mst_topology_state *mst_state;
11202 	struct drm_dp_mst_topology_mgr *mgr;
11203 #endif
11204 
11205 	trace_amdgpu_dm_atomic_check_begin(state);
11206 
11207 	ret = drm_atomic_helper_check_modeset(dev, state);
11208 	if (ret) {
11209 		DRM_DEBUG_DRIVER("drm_atomic_helper_check_modeset() failed\n");
11210 		goto fail;
11211 	}
11212 
11213 	/* Check connector changes */
11214 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
11215 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
11216 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
11217 
11218 		/* Skip connectors that are disabled or part of modeset already. */
11219 		if (!old_con_state->crtc && !new_con_state->crtc)
11220 			continue;
11221 
11222 		if (!new_con_state->crtc)
11223 			continue;
11224 
11225 		new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
11226 		if (IS_ERR(new_crtc_state)) {
11227 			DRM_DEBUG_DRIVER("drm_atomic_get_crtc_state() failed\n");
11228 			ret = PTR_ERR(new_crtc_state);
11229 			goto fail;
11230 		}
11231 
11232 		if (dm_old_con_state->abm_level !=
11233 		    dm_new_con_state->abm_level)
11234 			new_crtc_state->connectors_changed = true;
11235 	}
11236 
11237 #if defined(CONFIG_DRM_AMD_DC_DCN)
11238 	if (dc_resource_is_dsc_encoding_supported(dc)) {
11239 		for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11240 			if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
11241 				ret = add_affected_mst_dsc_crtcs(state, crtc);
11242 				if (ret) {
11243 					DRM_DEBUG_DRIVER("add_affected_mst_dsc_crtcs() failed\n");
11244 					goto fail;
11245 				}
11246 			}
11247 		}
11248 		if (!pre_validate_dsc(state, &dm_state, vars)) {
11249 			ret = -EINVAL;
11250 			goto fail;
11251 		}
11252 	}
11253 #endif
11254 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11255 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
11256 
11257 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
11258 		    !new_crtc_state->color_mgmt_changed &&
11259 		    old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
11260 			dm_old_crtc_state->dsc_force_changed == false)
11261 			continue;
11262 
11263 		ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
11264 		if (ret) {
11265 			DRM_DEBUG_DRIVER("amdgpu_dm_verify_lut_sizes() failed\n");
11266 			goto fail;
11267 		}
11268 
11269 		if (!new_crtc_state->enable)
11270 			continue;
11271 
11272 		ret = drm_atomic_add_affected_connectors(state, crtc);
11273 		if (ret) {
11274 			DRM_DEBUG_DRIVER("drm_atomic_add_affected_connectors() failed\n");
11275 			goto fail;
11276 		}
11277 
11278 		ret = drm_atomic_add_affected_planes(state, crtc);
11279 		if (ret) {
11280 			DRM_DEBUG_DRIVER("drm_atomic_add_affected_planes() failed\n");
11281 			goto fail;
11282 		}
11283 
11284 		if (dm_old_crtc_state->dsc_force_changed)
11285 			new_crtc_state->mode_changed = true;
11286 	}
11287 
11288 	/*
11289 	 * Add all primary and overlay planes on the CRTC to the state
11290 	 * whenever a plane is enabled to maintain correct z-ordering
11291 	 * and to enable fast surface updates.
11292 	 */
11293 	drm_for_each_crtc(crtc, dev) {
11294 		bool modified = false;
11295 
11296 		for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
11297 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
11298 				continue;
11299 
11300 			if (new_plane_state->crtc == crtc ||
11301 			    old_plane_state->crtc == crtc) {
11302 				modified = true;
11303 				break;
11304 			}
11305 		}
11306 
11307 		if (!modified)
11308 			continue;
11309 
11310 		drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
11311 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
11312 				continue;
11313 
11314 			new_plane_state =
11315 				drm_atomic_get_plane_state(state, plane);
11316 
11317 			if (IS_ERR(new_plane_state)) {
11318 				ret = PTR_ERR(new_plane_state);
11319 				DRM_DEBUG_DRIVER("new_plane_state is BAD\n");
11320 				goto fail;
11321 			}
11322 		}
11323 	}
11324 
11325 	/* Remove exiting planes if they are modified */
11326 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
11327 		ret = dm_update_plane_state(dc, state, plane,
11328 					    old_plane_state,
11329 					    new_plane_state,
11330 					    false,
11331 					    &lock_and_validation_needed);
11332 		if (ret) {
11333 			DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
11334 			goto fail;
11335 		}
11336 	}
11337 
11338 	/* Disable all crtcs which require disable */
11339 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11340 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
11341 					   old_crtc_state,
11342 					   new_crtc_state,
11343 					   false,
11344 					   &lock_and_validation_needed);
11345 		if (ret) {
11346 			DRM_DEBUG_DRIVER("DISABLE: dm_update_crtc_state() failed\n");
11347 			goto fail;
11348 		}
11349 	}
11350 
11351 	/* Enable all crtcs which require enable */
11352 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11353 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
11354 					   old_crtc_state,
11355 					   new_crtc_state,
11356 					   true,
11357 					   &lock_and_validation_needed);
11358 		if (ret) {
11359 			DRM_DEBUG_DRIVER("ENABLE: dm_update_crtc_state() failed\n");
11360 			goto fail;
11361 		}
11362 	}
11363 
11364 	/* Add new/modified planes */
11365 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
11366 		ret = dm_update_plane_state(dc, state, plane,
11367 					    old_plane_state,
11368 					    new_plane_state,
11369 					    true,
11370 					    &lock_and_validation_needed);
11371 		if (ret) {
11372 			DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
11373 			goto fail;
11374 		}
11375 	}
11376 
11377 	/* Run this here since we want to validate the streams we created */
11378 	ret = drm_atomic_helper_check_planes(dev, state);
11379 	if (ret) {
11380 		DRM_DEBUG_DRIVER("drm_atomic_helper_check_planes() failed\n");
11381 		goto fail;
11382 	}
11383 
11384 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
11385 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
11386 		if (dm_new_crtc_state->mpo_requested)
11387 			DRM_DEBUG_DRIVER("MPO enablement requested on crtc:[%p]\n", crtc);
11388 	}
11389 
11390 	/* Check cursor planes scaling */
11391 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
11392 		ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
11393 		if (ret) {
11394 			DRM_DEBUG_DRIVER("dm_check_crtc_cursor() failed\n");
11395 			goto fail;
11396 		}
11397 	}
11398 
11399 	if (state->legacy_cursor_update) {
11400 		/*
11401 		 * This is a fast cursor update coming from the plane update
11402 		 * helper, check if it can be done asynchronously for better
11403 		 * performance.
11404 		 */
11405 		state->async_update =
11406 			!drm_atomic_helper_async_check(dev, state);
11407 
11408 		/*
11409 		 * Skip the remaining global validation if this is an async
11410 		 * update. Cursor updates can be done without affecting
11411 		 * state or bandwidth calcs and this avoids the performance
11412 		 * penalty of locking the private state object and
11413 		 * allocating a new dc_state.
11414 		 */
11415 		if (state->async_update)
11416 			return 0;
11417 	}
11418 
11419 	/* Check scaling and underscan changes*/
11420 	/* TODO Removed scaling changes validation due to inability to commit
11421 	 * new stream into context w\o causing full reset. Need to
11422 	 * decide how to handle.
11423 	 */
11424 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
11425 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
11426 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
11427 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
11428 
11429 		/* Skip any modesets/resets */
11430 		if (!acrtc || drm_atomic_crtc_needs_modeset(
11431 				drm_atomic_get_new_crtc_state(state, &acrtc->base)))
11432 			continue;
11433 
11434 		/* Skip any thing not scale or underscan changes */
11435 		if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
11436 			continue;
11437 
11438 		lock_and_validation_needed = true;
11439 	}
11440 
11441 #if defined(CONFIG_DRM_AMD_DC_DCN)
11442 	/* set the slot info for each mst_state based on the link encoding format */
11443 	for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
11444 		struct amdgpu_dm_connector *aconnector;
11445 		struct drm_connector *connector;
11446 		struct drm_connector_list_iter iter;
11447 		u8 link_coding_cap;
11448 
11449 		if (!mgr->mst_state )
11450 			continue;
11451 
11452 		drm_connector_list_iter_begin(dev, &iter);
11453 		drm_for_each_connector_iter(connector, &iter) {
11454 			int id = connector->index;
11455 
11456 			if (id == mst_state->mgr->conn_base_id) {
11457 				aconnector = to_amdgpu_dm_connector(connector);
11458 				link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link);
11459 				drm_dp_mst_update_slots(mst_state, link_coding_cap);
11460 
11461 				break;
11462 			}
11463 		}
11464 		drm_connector_list_iter_end(&iter);
11465 
11466 	}
11467 #endif
11468 	/**
11469 	 * Streams and planes are reset when there are changes that affect
11470 	 * bandwidth. Anything that affects bandwidth needs to go through
11471 	 * DC global validation to ensure that the configuration can be applied
11472 	 * to hardware.
11473 	 *
11474 	 * We have to currently stall out here in atomic_check for outstanding
11475 	 * commits to finish in this case because our IRQ handlers reference
11476 	 * DRM state directly - we can end up disabling interrupts too early
11477 	 * if we don't.
11478 	 *
11479 	 * TODO: Remove this stall and drop DM state private objects.
11480 	 */
11481 	if (lock_and_validation_needed) {
11482 		ret = dm_atomic_get_state(state, &dm_state);
11483 		if (ret) {
11484 			DRM_DEBUG_DRIVER("dm_atomic_get_state() failed\n");
11485 			goto fail;
11486 		}
11487 
11488 		ret = do_aquire_global_lock(dev, state);
11489 		if (ret) {
11490 			DRM_DEBUG_DRIVER("do_aquire_global_lock() failed\n");
11491 			goto fail;
11492 		}
11493 
11494 #if defined(CONFIG_DRM_AMD_DC_DCN)
11495 		if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars)) {
11496 			DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n");
11497 			ret = -EINVAL;
11498 			goto fail;
11499 		}
11500 
11501 		ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
11502 		if (ret) {
11503 			DRM_DEBUG_DRIVER("dm_update_mst_vcpi_slots_for_dsc() failed\n");
11504 			goto fail;
11505 		}
11506 #endif
11507 
11508 		/*
11509 		 * Perform validation of MST topology in the state:
11510 		 * We need to perform MST atomic check before calling
11511 		 * dc_validate_global_state(), or there is a chance
11512 		 * to get stuck in an infinite loop and hang eventually.
11513 		 */
11514 		ret = drm_dp_mst_atomic_check(state);
11515 		if (ret) {
11516 			DRM_DEBUG_DRIVER("drm_dp_mst_atomic_check() failed\n");
11517 			goto fail;
11518 		}
11519 		status = dc_validate_global_state(dc, dm_state->context, true);
11520 		if (status != DC_OK) {
11521 			DRM_DEBUG_DRIVER("DC global validation failure: %s (%d)",
11522 				       dc_status_to_str(status), status);
11523 			ret = -EINVAL;
11524 			goto fail;
11525 		}
11526 	} else {
11527 		/*
11528 		 * The commit is a fast update. Fast updates shouldn't change
11529 		 * the DC context, affect global validation, and can have their
11530 		 * commit work done in parallel with other commits not touching
11531 		 * the same resource. If we have a new DC context as part of
11532 		 * the DM atomic state from validation we need to free it and
11533 		 * retain the existing one instead.
11534 		 *
11535 		 * Furthermore, since the DM atomic state only contains the DC
11536 		 * context and can safely be annulled, we can free the state
11537 		 * and clear the associated private object now to free
11538 		 * some memory and avoid a possible use-after-free later.
11539 		 */
11540 
11541 		for (i = 0; i < state->num_private_objs; i++) {
11542 			struct drm_private_obj *obj = state->private_objs[i].ptr;
11543 
11544 			if (obj->funcs == adev->dm.atomic_obj.funcs) {
11545 				int j = state->num_private_objs-1;
11546 
11547 				dm_atomic_destroy_state(obj,
11548 						state->private_objs[i].state);
11549 
11550 				/* If i is not at the end of the array then the
11551 				 * last element needs to be moved to where i was
11552 				 * before the array can safely be truncated.
11553 				 */
11554 				if (i != j)
11555 					state->private_objs[i] =
11556 						state->private_objs[j];
11557 
11558 				state->private_objs[j].ptr = NULL;
11559 				state->private_objs[j].state = NULL;
11560 				state->private_objs[j].old_state = NULL;
11561 				state->private_objs[j].new_state = NULL;
11562 
11563 				state->num_private_objs = j;
11564 				break;
11565 			}
11566 		}
11567 	}
11568 
11569 	/* Store the overall update type for use later in atomic check. */
11570 	for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
11571 		struct dm_crtc_state *dm_new_crtc_state =
11572 			to_dm_crtc_state(new_crtc_state);
11573 
11574 		dm_new_crtc_state->update_type = lock_and_validation_needed ?
11575 							 UPDATE_TYPE_FULL :
11576 							 UPDATE_TYPE_FAST;
11577 	}
11578 
11579 	/* Must be success */
11580 	WARN_ON(ret);
11581 
11582 	trace_amdgpu_dm_atomic_check_finish(state, ret);
11583 
11584 	return ret;
11585 
11586 fail:
11587 	if (ret == -EDEADLK)
11588 		DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
11589 	else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
11590 		DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
11591 	else
11592 		DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
11593 
11594 	trace_amdgpu_dm_atomic_check_finish(state, ret);
11595 
11596 	return ret;
11597 }
11598 
11599 static bool is_dp_capable_without_timing_msa(struct dc *dc,
11600 					     struct amdgpu_dm_connector *amdgpu_dm_connector)
11601 {
11602 	uint8_t dpcd_data;
11603 	bool capable = false;
11604 
11605 	if (amdgpu_dm_connector->dc_link &&
11606 		dm_helpers_dp_read_dpcd(
11607 				NULL,
11608 				amdgpu_dm_connector->dc_link,
11609 				DP_DOWN_STREAM_PORT_COUNT,
11610 				&dpcd_data,
11611 				sizeof(dpcd_data))) {
11612 		capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
11613 	}
11614 
11615 	return capable;
11616 }
11617 
11618 static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
11619 		unsigned int offset,
11620 		unsigned int total_length,
11621 		uint8_t *data,
11622 		unsigned int length,
11623 		struct amdgpu_hdmi_vsdb_info *vsdb)
11624 {
11625 	bool res;
11626 	union dmub_rb_cmd cmd;
11627 	struct dmub_cmd_send_edid_cea *input;
11628 	struct dmub_cmd_edid_cea_output *output;
11629 
11630 	if (length > DMUB_EDID_CEA_DATA_CHUNK_BYTES)
11631 		return false;
11632 
11633 	memset(&cmd, 0, sizeof(cmd));
11634 
11635 	input = &cmd.edid_cea.data.input;
11636 
11637 	cmd.edid_cea.header.type = DMUB_CMD__EDID_CEA;
11638 	cmd.edid_cea.header.sub_type = 0;
11639 	cmd.edid_cea.header.payload_bytes =
11640 		sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header);
11641 	input->offset = offset;
11642 	input->length = length;
11643 	input->cea_total_length = total_length;
11644 	memcpy(input->payload, data, length);
11645 
11646 	res = dc_dmub_srv_cmd_with_reply_data(dm->dc->ctx->dmub_srv, &cmd);
11647 	if (!res) {
11648 		DRM_ERROR("EDID CEA parser failed\n");
11649 		return false;
11650 	}
11651 
11652 	output = &cmd.edid_cea.data.output;
11653 
11654 	if (output->type == DMUB_CMD__EDID_CEA_ACK) {
11655 		if (!output->ack.success) {
11656 			DRM_ERROR("EDID CEA ack failed at offset %d\n",
11657 					output->ack.offset);
11658 		}
11659 	} else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) {
11660 		if (!output->amd_vsdb.vsdb_found)
11661 			return false;
11662 
11663 		vsdb->freesync_supported = output->amd_vsdb.freesync_supported;
11664 		vsdb->amd_vsdb_version = output->amd_vsdb.amd_vsdb_version;
11665 		vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate;
11666 		vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate;
11667 	} else {
11668 		DRM_WARN("Unknown EDID CEA parser results\n");
11669 		return false;
11670 	}
11671 
11672 	return true;
11673 }
11674 
11675 static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
11676 		uint8_t *edid_ext, int len,
11677 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
11678 {
11679 	int i;
11680 
11681 	/* send extension block to DMCU for parsing */
11682 	for (i = 0; i < len; i += 8) {
11683 		bool res;
11684 		int offset;
11685 
11686 		/* send 8 bytes a time */
11687 		if (!dc_edid_parser_send_cea(dm->dc, i, len, &edid_ext[i], 8))
11688 			return false;
11689 
11690 		if (i+8 == len) {
11691 			/* EDID block sent completed, expect result */
11692 			int version, min_rate, max_rate;
11693 
11694 			res = dc_edid_parser_recv_amd_vsdb(dm->dc, &version, &min_rate, &max_rate);
11695 			if (res) {
11696 				/* amd vsdb found */
11697 				vsdb_info->freesync_supported = 1;
11698 				vsdb_info->amd_vsdb_version = version;
11699 				vsdb_info->min_refresh_rate_hz = min_rate;
11700 				vsdb_info->max_refresh_rate_hz = max_rate;
11701 				return true;
11702 			}
11703 			/* not amd vsdb */
11704 			return false;
11705 		}
11706 
11707 		/* check for ack*/
11708 		res = dc_edid_parser_recv_cea_ack(dm->dc, &offset);
11709 		if (!res)
11710 			return false;
11711 	}
11712 
11713 	return false;
11714 }
11715 
11716 static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,
11717 		uint8_t *edid_ext, int len,
11718 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
11719 {
11720 	int i;
11721 
11722 	/* send extension block to DMCU for parsing */
11723 	for (i = 0; i < len; i += 8) {
11724 		/* send 8 bytes a time */
11725 		if (!dm_edid_parser_send_cea(dm, i, len, &edid_ext[i], 8, vsdb_info))
11726 			return false;
11727 	}
11728 
11729 	return vsdb_info->freesync_supported;
11730 }
11731 
11732 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
11733 		uint8_t *edid_ext, int len,
11734 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
11735 {
11736 	struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
11737 
11738 	if (adev->dm.dmub_srv)
11739 		return parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info);
11740 	else
11741 		return parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info);
11742 }
11743 
11744 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
11745 		struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
11746 {
11747 	uint8_t *edid_ext = NULL;
11748 	int i;
11749 	bool valid_vsdb_found = false;
11750 
11751 	/*----- drm_find_cea_extension() -----*/
11752 	/* No EDID or EDID extensions */
11753 	if (edid == NULL || edid->extensions == 0)
11754 		return -ENODEV;
11755 
11756 	/* Find CEA extension */
11757 	for (i = 0; i < edid->extensions; i++) {
11758 		edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
11759 		if (edid_ext[0] == CEA_EXT)
11760 			break;
11761 	}
11762 
11763 	if (i == edid->extensions)
11764 		return -ENODEV;
11765 
11766 	/*----- cea_db_offsets() -----*/
11767 	if (edid_ext[0] != CEA_EXT)
11768 		return -ENODEV;
11769 
11770 	valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
11771 
11772 	return valid_vsdb_found ? i : -ENODEV;
11773 }
11774 
11775 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
11776 					struct edid *edid)
11777 {
11778 	int i = 0;
11779 	struct detailed_timing *timing;
11780 	struct detailed_non_pixel *data;
11781 	struct detailed_data_monitor_range *range;
11782 	struct amdgpu_dm_connector *amdgpu_dm_connector =
11783 			to_amdgpu_dm_connector(connector);
11784 	struct dm_connector_state *dm_con_state = NULL;
11785 	struct dc_sink *sink;
11786 
11787 	struct drm_device *dev = connector->dev;
11788 	struct amdgpu_device *adev = drm_to_adev(dev);
11789 	bool freesync_capable = false;
11790 	struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
11791 
11792 	if (!connector->state) {
11793 		DRM_ERROR("%s - Connector has no state", __func__);
11794 		goto update;
11795 	}
11796 
11797 	sink = amdgpu_dm_connector->dc_sink ?
11798 		amdgpu_dm_connector->dc_sink :
11799 		amdgpu_dm_connector->dc_em_sink;
11800 
11801 	if (!edid || !sink) {
11802 		dm_con_state = to_dm_connector_state(connector->state);
11803 
11804 		amdgpu_dm_connector->min_vfreq = 0;
11805 		amdgpu_dm_connector->max_vfreq = 0;
11806 		amdgpu_dm_connector->pixel_clock_mhz = 0;
11807 		connector->display_info.monitor_range.min_vfreq = 0;
11808 		connector->display_info.monitor_range.max_vfreq = 0;
11809 		freesync_capable = false;
11810 
11811 		goto update;
11812 	}
11813 
11814 	dm_con_state = to_dm_connector_state(connector->state);
11815 
11816 	if (!adev->dm.freesync_module)
11817 		goto update;
11818 
11819 
11820 	if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
11821 		|| sink->sink_signal == SIGNAL_TYPE_EDP) {
11822 		bool edid_check_required = false;
11823 
11824 		if (edid) {
11825 			edid_check_required = is_dp_capable_without_timing_msa(
11826 						adev->dm.dc,
11827 						amdgpu_dm_connector);
11828 		}
11829 
11830 		if (edid_check_required == true && (edid->version > 1 ||
11831 		   (edid->version == 1 && edid->revision > 1))) {
11832 			for (i = 0; i < 4; i++) {
11833 
11834 				timing	= &edid->detailed_timings[i];
11835 				data	= &timing->data.other_data;
11836 				range	= &data->data.range;
11837 				/*
11838 				 * Check if monitor has continuous frequency mode
11839 				 */
11840 				if (data->type != EDID_DETAIL_MONITOR_RANGE)
11841 					continue;
11842 				/*
11843 				 * Check for flag range limits only. If flag == 1 then
11844 				 * no additional timing information provided.
11845 				 * Default GTF, GTF Secondary curve and CVT are not
11846 				 * supported
11847 				 */
11848 				if (range->flags != 1)
11849 					continue;
11850 
11851 				amdgpu_dm_connector->min_vfreq = range->min_vfreq;
11852 				amdgpu_dm_connector->max_vfreq = range->max_vfreq;
11853 				amdgpu_dm_connector->pixel_clock_mhz =
11854 					range->pixel_clock_mhz * 10;
11855 
11856 				connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
11857 				connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
11858 
11859 				break;
11860 			}
11861 
11862 			if (amdgpu_dm_connector->max_vfreq -
11863 			    amdgpu_dm_connector->min_vfreq > 10) {
11864 
11865 				freesync_capable = true;
11866 			}
11867 		}
11868 	} else if (edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
11869 		i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
11870 		if (i >= 0 && vsdb_info.freesync_supported) {
11871 			timing  = &edid->detailed_timings[i];
11872 			data    = &timing->data.other_data;
11873 
11874 			amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
11875 			amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
11876 			if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
11877 				freesync_capable = true;
11878 
11879 			connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
11880 			connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
11881 		}
11882 	}
11883 
11884 update:
11885 	if (dm_con_state)
11886 		dm_con_state->freesync_capable = freesync_capable;
11887 
11888 	if (connector->vrr_capable_property)
11889 		drm_connector_set_vrr_capable_property(connector,
11890 						       freesync_capable);
11891 }
11892 
11893 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
11894 {
11895 	struct amdgpu_device *adev = drm_to_adev(dev);
11896 	struct dc *dc = adev->dm.dc;
11897 	int i;
11898 
11899 	mutex_lock(&adev->dm.dc_lock);
11900 	if (dc->current_state) {
11901 		for (i = 0; i < dc->current_state->stream_count; ++i)
11902 			dc->current_state->streams[i]
11903 				->triggered_crtc_reset.enabled =
11904 				adev->dm.force_timing_sync;
11905 
11906 		dm_enable_per_frame_crtc_master_sync(dc->current_state);
11907 		dc_trigger_sync(dc, dc->current_state);
11908 	}
11909 	mutex_unlock(&adev->dm.dc_lock);
11910 }
11911 
11912 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
11913 		       uint32_t value, const char *func_name)
11914 {
11915 #ifdef DM_CHECK_ADDR_0
11916 	if (address == 0) {
11917 		DC_ERR("invalid register write. address = 0");
11918 		return;
11919 	}
11920 #endif
11921 	cgs_write_register(ctx->cgs_device, address, value);
11922 	trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
11923 }
11924 
11925 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
11926 			  const char *func_name)
11927 {
11928 	uint32_t value;
11929 #ifdef DM_CHECK_ADDR_0
11930 	if (address == 0) {
11931 		DC_ERR("invalid register read; address = 0\n");
11932 		return 0;
11933 	}
11934 #endif
11935 
11936 	if (ctx->dmub_srv &&
11937 	    ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
11938 	    !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
11939 		ASSERT(false);
11940 		return 0;
11941 	}
11942 
11943 	value = cgs_read_register(ctx->cgs_device, address);
11944 
11945 	trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
11946 
11947 	return value;
11948 }
11949 
11950 static int amdgpu_dm_set_dmub_async_sync_status(bool is_cmd_aux,
11951 						struct dc_context *ctx,
11952 						uint8_t status_type,
11953 						uint32_t *operation_result)
11954 {
11955 	struct amdgpu_device *adev = ctx->driver_context;
11956 	int return_status = -1;
11957 	struct dmub_notification *p_notify = adev->dm.dmub_notify;
11958 
11959 	if (is_cmd_aux) {
11960 		if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
11961 			return_status = p_notify->aux_reply.length;
11962 			*operation_result = p_notify->result;
11963 		} else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT) {
11964 			*operation_result = AUX_RET_ERROR_TIMEOUT;
11965 		} else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_FAIL) {
11966 			*operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE;
11967 		} else {
11968 			*operation_result = AUX_RET_ERROR_UNKNOWN;
11969 		}
11970 	} else {
11971 		if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
11972 			return_status = 0;
11973 			*operation_result = p_notify->sc_status;
11974 		} else {
11975 			*operation_result = SET_CONFIG_UNKNOWN_ERROR;
11976 		}
11977 	}
11978 
11979 	return return_status;
11980 }
11981 
11982 int amdgpu_dm_process_dmub_aux_transfer_sync(bool is_cmd_aux, struct dc_context *ctx,
11983 	unsigned int link_index, void *cmd_payload, void *operation_result)
11984 {
11985 	struct amdgpu_device *adev = ctx->driver_context;
11986 	int ret = 0;
11987 
11988 	if (is_cmd_aux) {
11989 		dc_process_dmub_aux_transfer_async(ctx->dc,
11990 			link_index, (struct aux_payload *)cmd_payload);
11991 	} else if (dc_process_dmub_set_config_async(ctx->dc, link_index,
11992 					(struct set_config_cmd_payload *)cmd_payload,
11993 					adev->dm.dmub_notify)) {
11994 		return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11995 					ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
11996 					(uint32_t *)operation_result);
11997 	}
11998 
11999 	ret = wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ);
12000 	if (ret == 0) {
12001 		DRM_ERROR("wait_for_completion_timeout timeout!");
12002 		return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
12003 				ctx, DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT,
12004 				(uint32_t *)operation_result);
12005 	}
12006 
12007 	if (is_cmd_aux) {
12008 		if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) {
12009 			struct aux_payload *payload = (struct aux_payload *)cmd_payload;
12010 
12011 			payload->reply[0] = adev->dm.dmub_notify->aux_reply.command;
12012 			if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
12013 			    payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK) {
12014 				memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
12015 				       adev->dm.dmub_notify->aux_reply.length);
12016 			}
12017 		}
12018 	}
12019 
12020 	return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
12021 			ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
12022 			(uint32_t *)operation_result);
12023 }
12024 
12025 /*
12026  * Check whether seamless boot is supported.
12027  *
12028  * So far we only support seamless boot on CHIP_VANGOGH.
12029  * If everything goes well, we may consider expanding
12030  * seamless boot to other ASICs.
12031  */
12032 bool check_seamless_boot_capability(struct amdgpu_device *adev)
12033 {
12034 	switch (adev->asic_type) {
12035 	case CHIP_VANGOGH:
12036 		if (!adev->mman.keep_stolen_vga_memory)
12037 			return true;
12038 		break;
12039 	default:
12040 		break;
12041 	}
12042 
12043 	return false;
12044 }
12045