xref: /linux/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c (revision 409c188c57cdb5cb1dfcac79e72b5169f0463fe4)
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28 
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc_link_dp.h"
32 #include "link_enc_cfg.h"
33 #include "dc/inc/core_types.h"
34 #include "dal_asic_id.h"
35 #include "dmub/dmub_srv.h"
36 #include "dc/inc/hw/dmcu.h"
37 #include "dc/inc/hw/abm.h"
38 #include "dc/dc_dmub_srv.h"
39 #include "dc/dc_edid_parser.h"
40 #include "dc/dc_stat.h"
41 #include "amdgpu_dm_trace.h"
42 
43 #include "vid.h"
44 #include "amdgpu.h"
45 #include "amdgpu_display.h"
46 #include "amdgpu_ucode.h"
47 #include "atom.h"
48 #include "amdgpu_dm.h"
49 #ifdef CONFIG_DRM_AMD_DC_HDCP
50 #include "amdgpu_dm_hdcp.h"
51 #include <drm/display/drm_hdcp_helper.h>
52 #endif
53 #include "amdgpu_pm.h"
54 #include "amdgpu_atombios.h"
55 
56 #include "amd_shared.h"
57 #include "amdgpu_dm_irq.h"
58 #include "dm_helpers.h"
59 #include "amdgpu_dm_mst_types.h"
60 #if defined(CONFIG_DEBUG_FS)
61 #include "amdgpu_dm_debugfs.h"
62 #endif
63 #include "amdgpu_dm_psr.h"
64 
65 #include "ivsrcid/ivsrcid_vislands30.h"
66 
67 #include "i2caux_interface.h"
68 #include <linux/module.h>
69 #include <linux/moduleparam.h>
70 #include <linux/types.h>
71 #include <linux/pm_runtime.h>
72 #include <linux/pci.h>
73 #include <linux/firmware.h>
74 #include <linux/component.h>
75 #include <linux/dmi.h>
76 
77 #include <drm/display/drm_dp_mst_helper.h>
78 #include <drm/display/drm_hdmi_helper.h>
79 #include <drm/drm_atomic.h>
80 #include <drm/drm_atomic_uapi.h>
81 #include <drm/drm_atomic_helper.h>
82 #include <drm/drm_fb_helper.h>
83 #include <drm/drm_fourcc.h>
84 #include <drm/drm_edid.h>
85 #include <drm/drm_vblank.h>
86 #include <drm/drm_audio_component.h>
87 
88 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
89 
90 #include "dcn/dcn_1_0_offset.h"
91 #include "dcn/dcn_1_0_sh_mask.h"
92 #include "soc15_hw_ip.h"
93 #include "vega10_ip_offset.h"
94 
95 #include "soc15_common.h"
96 
97 #include "modules/inc/mod_freesync.h"
98 #include "modules/power/power_helpers.h"
99 #include "modules/inc/mod_info_packet.h"
100 
101 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
102 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
103 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
104 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
105 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
106 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
107 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
108 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
109 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
110 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
111 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
112 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
113 #define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
114 MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
115 #define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin"
116 MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB);
117 #define FIRMWARE_DCN_315_DMUB "amdgpu/dcn_3_1_5_dmcub.bin"
118 MODULE_FIRMWARE(FIRMWARE_DCN_315_DMUB);
119 #define FIRMWARE_DCN316_DMUB "amdgpu/dcn_3_1_6_dmcub.bin"
120 MODULE_FIRMWARE(FIRMWARE_DCN316_DMUB);
121 
122 #define FIRMWARE_RAVEN_DMCU		"amdgpu/raven_dmcu.bin"
123 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
124 
125 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
126 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
127 
128 /* Number of bytes in PSP header for firmware. */
129 #define PSP_HEADER_BYTES 0x100
130 
131 /* Number of bytes in PSP footer for firmware. */
132 #define PSP_FOOTER_BYTES 0x100
133 
134 /**
135  * DOC: overview
136  *
137  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
138  * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
139  * requests into DC requests, and DC responses into DRM responses.
140  *
141  * The root control structure is &struct amdgpu_display_manager.
142  */
143 
144 /* basic init/fini API */
145 static int amdgpu_dm_init(struct amdgpu_device *adev);
146 static void amdgpu_dm_fini(struct amdgpu_device *adev);
147 static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
148 
149 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
150 {
151 	switch (link->dpcd_caps.dongle_type) {
152 	case DISPLAY_DONGLE_NONE:
153 		return DRM_MODE_SUBCONNECTOR_Native;
154 	case DISPLAY_DONGLE_DP_VGA_CONVERTER:
155 		return DRM_MODE_SUBCONNECTOR_VGA;
156 	case DISPLAY_DONGLE_DP_DVI_CONVERTER:
157 	case DISPLAY_DONGLE_DP_DVI_DONGLE:
158 		return DRM_MODE_SUBCONNECTOR_DVID;
159 	case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
160 	case DISPLAY_DONGLE_DP_HDMI_DONGLE:
161 		return DRM_MODE_SUBCONNECTOR_HDMIA;
162 	case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
163 	default:
164 		return DRM_MODE_SUBCONNECTOR_Unknown;
165 	}
166 }
167 
168 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
169 {
170 	struct dc_link *link = aconnector->dc_link;
171 	struct drm_connector *connector = &aconnector->base;
172 	enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
173 
174 	if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
175 		return;
176 
177 	if (aconnector->dc_sink)
178 		subconnector = get_subconnector_type(link);
179 
180 	drm_object_property_set_value(&connector->base,
181 			connector->dev->mode_config.dp_subconnector_property,
182 			subconnector);
183 }
184 
185 /*
186  * initializes drm_device display related structures, based on the information
187  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
188  * drm_encoder, drm_mode_config
189  *
190  * Returns 0 on success
191  */
192 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
193 /* removes and deallocates the drm structures, created by the above function */
194 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
195 
196 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
197 				struct drm_plane *plane,
198 				unsigned long possible_crtcs,
199 				const struct dc_plane_cap *plane_cap);
200 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
201 			       struct drm_plane *plane,
202 			       uint32_t link_index);
203 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
204 				    struct amdgpu_dm_connector *amdgpu_dm_connector,
205 				    uint32_t link_index,
206 				    struct amdgpu_encoder *amdgpu_encoder);
207 static int amdgpu_dm_encoder_init(struct drm_device *dev,
208 				  struct amdgpu_encoder *aencoder,
209 				  uint32_t link_index);
210 
211 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
212 
213 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
214 
215 static int amdgpu_dm_atomic_check(struct drm_device *dev,
216 				  struct drm_atomic_state *state);
217 
218 static void handle_cursor_update(struct drm_plane *plane,
219 				 struct drm_plane_state *old_plane_state);
220 
221 static const struct drm_format_info *
222 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
223 
224 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector);
225 static void handle_hpd_rx_irq(void *param);
226 
227 static bool
228 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
229 				 struct drm_crtc_state *new_crtc_state);
230 /*
231  * dm_vblank_get_counter
232  *
233  * @brief
234  * Get counter for number of vertical blanks
235  *
236  * @param
237  * struct amdgpu_device *adev - [in] desired amdgpu device
238  * int disp_idx - [in] which CRTC to get the counter from
239  *
240  * @return
241  * Counter for vertical blanks
242  */
243 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
244 {
245 	if (crtc >= adev->mode_info.num_crtc)
246 		return 0;
247 	else {
248 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
249 
250 		if (acrtc->dm_irq_params.stream == NULL) {
251 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
252 				  crtc);
253 			return 0;
254 		}
255 
256 		return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
257 	}
258 }
259 
260 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
261 				  u32 *vbl, u32 *position)
262 {
263 	uint32_t v_blank_start, v_blank_end, h_position, v_position;
264 
265 	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
266 		return -EINVAL;
267 	else {
268 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
269 
270 		if (acrtc->dm_irq_params.stream ==  NULL) {
271 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
272 				  crtc);
273 			return 0;
274 		}
275 
276 		/*
277 		 * TODO rework base driver to use values directly.
278 		 * for now parse it back into reg-format
279 		 */
280 		dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
281 					 &v_blank_start,
282 					 &v_blank_end,
283 					 &h_position,
284 					 &v_position);
285 
286 		*position = v_position | (h_position << 16);
287 		*vbl = v_blank_start | (v_blank_end << 16);
288 	}
289 
290 	return 0;
291 }
292 
293 static bool dm_is_idle(void *handle)
294 {
295 	/* XXX todo */
296 	return true;
297 }
298 
299 static int dm_wait_for_idle(void *handle)
300 {
301 	/* XXX todo */
302 	return 0;
303 }
304 
305 static bool dm_check_soft_reset(void *handle)
306 {
307 	return false;
308 }
309 
310 static int dm_soft_reset(void *handle)
311 {
312 	/* XXX todo */
313 	return 0;
314 }
315 
316 static struct amdgpu_crtc *
317 get_crtc_by_otg_inst(struct amdgpu_device *adev,
318 		     int otg_inst)
319 {
320 	struct drm_device *dev = adev_to_drm(adev);
321 	struct drm_crtc *crtc;
322 	struct amdgpu_crtc *amdgpu_crtc;
323 
324 	if (WARN_ON(otg_inst == -1))
325 		return adev->mode_info.crtcs[0];
326 
327 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
328 		amdgpu_crtc = to_amdgpu_crtc(crtc);
329 
330 		if (amdgpu_crtc->otg_inst == otg_inst)
331 			return amdgpu_crtc;
332 	}
333 
334 	return NULL;
335 }
336 
337 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
338 {
339 	return acrtc->dm_irq_params.freesync_config.state ==
340 		       VRR_STATE_ACTIVE_VARIABLE ||
341 	       acrtc->dm_irq_params.freesync_config.state ==
342 		       VRR_STATE_ACTIVE_FIXED;
343 }
344 
345 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
346 {
347 	return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
348 	       dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
349 }
350 
351 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
352 					      struct dm_crtc_state *new_state)
353 {
354 	if (new_state->freesync_config.state ==  VRR_STATE_ACTIVE_FIXED)
355 		return true;
356 	else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
357 		return true;
358 	else
359 		return false;
360 }
361 
362 /**
363  * dm_pflip_high_irq() - Handle pageflip interrupt
364  * @interrupt_params: ignored
365  *
366  * Handles the pageflip interrupt by notifying all interested parties
367  * that the pageflip has been completed.
368  */
369 static void dm_pflip_high_irq(void *interrupt_params)
370 {
371 	struct amdgpu_crtc *amdgpu_crtc;
372 	struct common_irq_params *irq_params = interrupt_params;
373 	struct amdgpu_device *adev = irq_params->adev;
374 	unsigned long flags;
375 	struct drm_pending_vblank_event *e;
376 	uint32_t vpos, hpos, v_blank_start, v_blank_end;
377 	bool vrr_active;
378 
379 	amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
380 
381 	/* IRQ could occur when in initial stage */
382 	/* TODO work and BO cleanup */
383 	if (amdgpu_crtc == NULL) {
384 		DC_LOG_PFLIP("CRTC is null, returning.\n");
385 		return;
386 	}
387 
388 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
389 
390 	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
391 		DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
392 						 amdgpu_crtc->pflip_status,
393 						 AMDGPU_FLIP_SUBMITTED,
394 						 amdgpu_crtc->crtc_id,
395 						 amdgpu_crtc);
396 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
397 		return;
398 	}
399 
400 	/* page flip completed. */
401 	e = amdgpu_crtc->event;
402 	amdgpu_crtc->event = NULL;
403 
404 	WARN_ON(!e);
405 
406 	vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
407 
408 	/* Fixed refresh rate, or VRR scanout position outside front-porch? */
409 	if (!vrr_active ||
410 	    !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
411 				      &v_blank_end, &hpos, &vpos) ||
412 	    (vpos < v_blank_start)) {
413 		/* Update to correct count and vblank timestamp if racing with
414 		 * vblank irq. This also updates to the correct vblank timestamp
415 		 * even in VRR mode, as scanout is past the front-porch atm.
416 		 */
417 		drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
418 
419 		/* Wake up userspace by sending the pageflip event with proper
420 		 * count and timestamp of vblank of flip completion.
421 		 */
422 		if (e) {
423 			drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
424 
425 			/* Event sent, so done with vblank for this flip */
426 			drm_crtc_vblank_put(&amdgpu_crtc->base);
427 		}
428 	} else if (e) {
429 		/* VRR active and inside front-porch: vblank count and
430 		 * timestamp for pageflip event will only be up to date after
431 		 * drm_crtc_handle_vblank() has been executed from late vblank
432 		 * irq handler after start of back-porch (vline 0). We queue the
433 		 * pageflip event for send-out by drm_crtc_handle_vblank() with
434 		 * updated timestamp and count, once it runs after us.
435 		 *
436 		 * We need to open-code this instead of using the helper
437 		 * drm_crtc_arm_vblank_event(), as that helper would
438 		 * call drm_crtc_accurate_vblank_count(), which we must
439 		 * not call in VRR mode while we are in front-porch!
440 		 */
441 
442 		/* sequence will be replaced by real count during send-out. */
443 		e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
444 		e->pipe = amdgpu_crtc->crtc_id;
445 
446 		list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
447 		e = NULL;
448 	}
449 
450 	/* Keep track of vblank of this flip for flip throttling. We use the
451 	 * cooked hw counter, as that one incremented at start of this vblank
452 	 * of pageflip completion, so last_flip_vblank is the forbidden count
453 	 * for queueing new pageflips if vsync + VRR is enabled.
454 	 */
455 	amdgpu_crtc->dm_irq_params.last_flip_vblank =
456 		amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
457 
458 	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
459 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
460 
461 	DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
462 		     amdgpu_crtc->crtc_id, amdgpu_crtc,
463 		     vrr_active, (int) !e);
464 }
465 
466 static void dm_crtc_handle_vblank(struct amdgpu_crtc *acrtc)
467 {
468 	struct drm_crtc *crtc = &acrtc->base;
469 	struct drm_device *dev = crtc->dev;
470 	unsigned long flags;
471 
472 	drm_crtc_handle_vblank(crtc);
473 
474 	spin_lock_irqsave(&dev->event_lock, flags);
475 
476 	/* Send completion event for cursor-only commits */
477 	if (acrtc->event && acrtc->pflip_status != AMDGPU_FLIP_SUBMITTED) {
478 		drm_crtc_send_vblank_event(crtc, acrtc->event);
479 		drm_crtc_vblank_put(crtc);
480 		acrtc->event = NULL;
481 	}
482 
483 	spin_unlock_irqrestore(&dev->event_lock, flags);
484 }
485 
486 static void dm_vupdate_high_irq(void *interrupt_params)
487 {
488 	struct common_irq_params *irq_params = interrupt_params;
489 	struct amdgpu_device *adev = irq_params->adev;
490 	struct amdgpu_crtc *acrtc;
491 	struct drm_device *drm_dev;
492 	struct drm_vblank_crtc *vblank;
493 	ktime_t frame_duration_ns, previous_timestamp;
494 	unsigned long flags;
495 	int vrr_active;
496 
497 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
498 
499 	if (acrtc) {
500 		vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
501 		drm_dev = acrtc->base.dev;
502 		vblank = &drm_dev->vblank[acrtc->base.index];
503 		previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
504 		frame_duration_ns = vblank->time - previous_timestamp;
505 
506 		if (frame_duration_ns > 0) {
507 			trace_amdgpu_refresh_rate_track(acrtc->base.index,
508 						frame_duration_ns,
509 						ktime_divns(NSEC_PER_SEC, frame_duration_ns));
510 			atomic64_set(&irq_params->previous_timestamp, vblank->time);
511 		}
512 
513 		DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
514 			      acrtc->crtc_id,
515 			      vrr_active);
516 
517 		/* Core vblank handling is done here after end of front-porch in
518 		 * vrr mode, as vblank timestamping will give valid results
519 		 * while now done after front-porch. This will also deliver
520 		 * page-flip completion events that have been queued to us
521 		 * if a pageflip happened inside front-porch.
522 		 */
523 		if (vrr_active) {
524 			dm_crtc_handle_vblank(acrtc);
525 
526 			/* BTR processing for pre-DCE12 ASICs */
527 			if (acrtc->dm_irq_params.stream &&
528 			    adev->family < AMDGPU_FAMILY_AI) {
529 				spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
530 				mod_freesync_handle_v_update(
531 				    adev->dm.freesync_module,
532 				    acrtc->dm_irq_params.stream,
533 				    &acrtc->dm_irq_params.vrr_params);
534 
535 				dc_stream_adjust_vmin_vmax(
536 				    adev->dm.dc,
537 				    acrtc->dm_irq_params.stream,
538 				    &acrtc->dm_irq_params.vrr_params.adjust);
539 				spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
540 			}
541 		}
542 	}
543 }
544 
545 /**
546  * dm_crtc_high_irq() - Handles CRTC interrupt
547  * @interrupt_params: used for determining the CRTC instance
548  *
549  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
550  * event handler.
551  */
552 static void dm_crtc_high_irq(void *interrupt_params)
553 {
554 	struct common_irq_params *irq_params = interrupt_params;
555 	struct amdgpu_device *adev = irq_params->adev;
556 	struct amdgpu_crtc *acrtc;
557 	unsigned long flags;
558 	int vrr_active;
559 
560 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
561 	if (!acrtc)
562 		return;
563 
564 	vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
565 
566 	DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
567 		      vrr_active, acrtc->dm_irq_params.active_planes);
568 
569 	/**
570 	 * Core vblank handling at start of front-porch is only possible
571 	 * in non-vrr mode, as only there vblank timestamping will give
572 	 * valid results while done in front-porch. Otherwise defer it
573 	 * to dm_vupdate_high_irq after end of front-porch.
574 	 */
575 	if (!vrr_active)
576 		dm_crtc_handle_vblank(acrtc);
577 
578 	/**
579 	 * Following stuff must happen at start of vblank, for crc
580 	 * computation and below-the-range btr support in vrr mode.
581 	 */
582 	amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
583 
584 	/* BTR updates need to happen before VUPDATE on Vega and above. */
585 	if (adev->family < AMDGPU_FAMILY_AI)
586 		return;
587 
588 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
589 
590 	if (acrtc->dm_irq_params.stream &&
591 	    acrtc->dm_irq_params.vrr_params.supported &&
592 	    acrtc->dm_irq_params.freesync_config.state ==
593 		    VRR_STATE_ACTIVE_VARIABLE) {
594 		mod_freesync_handle_v_update(adev->dm.freesync_module,
595 					     acrtc->dm_irq_params.stream,
596 					     &acrtc->dm_irq_params.vrr_params);
597 
598 		dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
599 					   &acrtc->dm_irq_params.vrr_params.adjust);
600 	}
601 
602 	/*
603 	 * If there aren't any active_planes then DCH HUBP may be clock-gated.
604 	 * In that case, pageflip completion interrupts won't fire and pageflip
605 	 * completion events won't get delivered. Prevent this by sending
606 	 * pending pageflip events from here if a flip is still pending.
607 	 *
608 	 * If any planes are enabled, use dm_pflip_high_irq() instead, to
609 	 * avoid race conditions between flip programming and completion,
610 	 * which could cause too early flip completion events.
611 	 */
612 	if (adev->family >= AMDGPU_FAMILY_RV &&
613 	    acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
614 	    acrtc->dm_irq_params.active_planes == 0) {
615 		if (acrtc->event) {
616 			drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
617 			acrtc->event = NULL;
618 			drm_crtc_vblank_put(&acrtc->base);
619 		}
620 		acrtc->pflip_status = AMDGPU_FLIP_NONE;
621 	}
622 
623 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
624 }
625 
626 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
627 /**
628  * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
629  * DCN generation ASICs
630  * @interrupt_params: interrupt parameters
631  *
632  * Used to set crc window/read out crc value at vertical line 0 position
633  */
634 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
635 {
636 	struct common_irq_params *irq_params = interrupt_params;
637 	struct amdgpu_device *adev = irq_params->adev;
638 	struct amdgpu_crtc *acrtc;
639 
640 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
641 
642 	if (!acrtc)
643 		return;
644 
645 	amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
646 }
647 #endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */
648 
649 /**
650  * dmub_aux_setconfig_callback - Callback for AUX or SET_CONFIG command.
651  * @adev: amdgpu_device pointer
652  * @notify: dmub notification structure
653  *
654  * Dmub AUX or SET_CONFIG command completion processing callback
655  * Copies dmub notification to DM which is to be read by AUX command.
656  * issuing thread and also signals the event to wake up the thread.
657  */
658 static void dmub_aux_setconfig_callback(struct amdgpu_device *adev,
659 					struct dmub_notification *notify)
660 {
661 	if (adev->dm.dmub_notify)
662 		memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification));
663 	if (notify->type == DMUB_NOTIFICATION_AUX_REPLY)
664 		complete(&adev->dm.dmub_aux_transfer_done);
665 }
666 
667 /**
668  * dmub_hpd_callback - DMUB HPD interrupt processing callback.
669  * @adev: amdgpu_device pointer
670  * @notify: dmub notification structure
671  *
672  * Dmub Hpd interrupt processing callback. Gets displayindex through the
673  * ink index and calls helper to do the processing.
674  */
675 static void dmub_hpd_callback(struct amdgpu_device *adev,
676 			      struct dmub_notification *notify)
677 {
678 	struct amdgpu_dm_connector *aconnector;
679 	struct amdgpu_dm_connector *hpd_aconnector = NULL;
680 	struct drm_connector *connector;
681 	struct drm_connector_list_iter iter;
682 	struct dc_link *link;
683 	uint8_t link_index = 0;
684 	struct drm_device *dev;
685 
686 	if (adev == NULL)
687 		return;
688 
689 	if (notify == NULL) {
690 		DRM_ERROR("DMUB HPD callback notification was NULL");
691 		return;
692 	}
693 
694 	if (notify->link_index > adev->dm.dc->link_count) {
695 		DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index);
696 		return;
697 	}
698 
699 	link_index = notify->link_index;
700 	link = adev->dm.dc->links[link_index];
701 	dev = adev->dm.ddev;
702 
703 	drm_connector_list_iter_begin(dev, &iter);
704 	drm_for_each_connector_iter(connector, &iter) {
705 		aconnector = to_amdgpu_dm_connector(connector);
706 		if (link && aconnector->dc_link == link) {
707 			DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index);
708 			hpd_aconnector = aconnector;
709 			break;
710 		}
711 	}
712 	drm_connector_list_iter_end(&iter);
713 
714 	if (hpd_aconnector) {
715 		if (notify->type == DMUB_NOTIFICATION_HPD)
716 			handle_hpd_irq_helper(hpd_aconnector);
717 		else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ)
718 			handle_hpd_rx_irq(hpd_aconnector);
719 	}
720 }
721 
722 /**
723  * register_dmub_notify_callback - Sets callback for DMUB notify
724  * @adev: amdgpu_device pointer
725  * @type: Type of dmub notification
726  * @callback: Dmub interrupt callback function
727  * @dmub_int_thread_offload: offload indicator
728  *
729  * API to register a dmub callback handler for a dmub notification
730  * Also sets indicator whether callback processing to be offloaded.
731  * to dmub interrupt handling thread
732  * Return: true if successfully registered, false if there is existing registration
733  */
734 static bool register_dmub_notify_callback(struct amdgpu_device *adev,
735 					  enum dmub_notification_type type,
736 					  dmub_notify_interrupt_callback_t callback,
737 					  bool dmub_int_thread_offload)
738 {
739 	if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) {
740 		adev->dm.dmub_callback[type] = callback;
741 		adev->dm.dmub_thread_offload[type] = dmub_int_thread_offload;
742 	} else
743 		return false;
744 
745 	return true;
746 }
747 
748 static void dm_handle_hpd_work(struct work_struct *work)
749 {
750 	struct dmub_hpd_work *dmub_hpd_wrk;
751 
752 	dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work);
753 
754 	if (!dmub_hpd_wrk->dmub_notify) {
755 		DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL");
756 		return;
757 	}
758 
759 	if (dmub_hpd_wrk->dmub_notify->type < ARRAY_SIZE(dmub_hpd_wrk->adev->dm.dmub_callback)) {
760 		dmub_hpd_wrk->adev->dm.dmub_callback[dmub_hpd_wrk->dmub_notify->type](dmub_hpd_wrk->adev,
761 		dmub_hpd_wrk->dmub_notify);
762 	}
763 
764 	kfree(dmub_hpd_wrk->dmub_notify);
765 	kfree(dmub_hpd_wrk);
766 
767 }
768 
769 #define DMUB_TRACE_MAX_READ 64
770 /**
771  * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
772  * @interrupt_params: used for determining the Outbox instance
773  *
774  * Handles the Outbox Interrupt
775  * event handler.
776  */
777 static void dm_dmub_outbox1_low_irq(void *interrupt_params)
778 {
779 	struct dmub_notification notify;
780 	struct common_irq_params *irq_params = interrupt_params;
781 	struct amdgpu_device *adev = irq_params->adev;
782 	struct amdgpu_display_manager *dm = &adev->dm;
783 	struct dmcub_trace_buf_entry entry = { 0 };
784 	uint32_t count = 0;
785 	struct dmub_hpd_work *dmub_hpd_wrk;
786 	struct dc_link *plink = NULL;
787 
788 	if (dc_enable_dmub_notifications(adev->dm.dc) &&
789 		irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
790 
791 		do {
792 			dc_stat_get_dmub_notification(adev->dm.dc, &notify);
793 			if (notify.type >= ARRAY_SIZE(dm->dmub_thread_offload)) {
794 				DRM_ERROR("DM: notify type %d invalid!", notify.type);
795 				continue;
796 			}
797 			if (!dm->dmub_callback[notify.type]) {
798 				DRM_DEBUG_DRIVER("DMUB notification skipped, no handler: type=%d\n", notify.type);
799 				continue;
800 			}
801 			if (dm->dmub_thread_offload[notify.type] == true) {
802 				dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC);
803 				if (!dmub_hpd_wrk) {
804 					DRM_ERROR("Failed to allocate dmub_hpd_wrk");
805 					return;
806 				}
807 				dmub_hpd_wrk->dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_ATOMIC);
808 				if (!dmub_hpd_wrk->dmub_notify) {
809 					kfree(dmub_hpd_wrk);
810 					DRM_ERROR("Failed to allocate dmub_hpd_wrk->dmub_notify");
811 					return;
812 				}
813 				INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work);
814 				if (dmub_hpd_wrk->dmub_notify)
815 					memcpy(dmub_hpd_wrk->dmub_notify, &notify, sizeof(struct dmub_notification));
816 				dmub_hpd_wrk->adev = adev;
817 				if (notify.type == DMUB_NOTIFICATION_HPD) {
818 					plink = adev->dm.dc->links[notify.link_index];
819 					if (plink) {
820 						plink->hpd_status =
821 							notify.hpd_status == DP_HPD_PLUG;
822 					}
823 				}
824 				queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work);
825 			} else {
826 				dm->dmub_callback[notify.type](adev, &notify);
827 			}
828 		} while (notify.pending_notification);
829 	}
830 
831 
832 	do {
833 		if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
834 			trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
835 							entry.param0, entry.param1);
836 
837 			DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
838 				 entry.trace_code, entry.tick_count, entry.param0, entry.param1);
839 		} else
840 			break;
841 
842 		count++;
843 
844 	} while (count <= DMUB_TRACE_MAX_READ);
845 
846 	if (count > DMUB_TRACE_MAX_READ)
847 		DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ");
848 }
849 
850 static int dm_set_clockgating_state(void *handle,
851 		  enum amd_clockgating_state state)
852 {
853 	return 0;
854 }
855 
856 static int dm_set_powergating_state(void *handle,
857 		  enum amd_powergating_state state)
858 {
859 	return 0;
860 }
861 
862 /* Prototypes of private functions */
863 static int dm_early_init(void* handle);
864 
865 /* Allocate memory for FBC compressed data  */
866 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
867 {
868 	struct drm_device *dev = connector->dev;
869 	struct amdgpu_device *adev = drm_to_adev(dev);
870 	struct dm_compressor_info *compressor = &adev->dm.compressor;
871 	struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
872 	struct drm_display_mode *mode;
873 	unsigned long max_size = 0;
874 
875 	if (adev->dm.dc->fbc_compressor == NULL)
876 		return;
877 
878 	if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
879 		return;
880 
881 	if (compressor->bo_ptr)
882 		return;
883 
884 
885 	list_for_each_entry(mode, &connector->modes, head) {
886 		if (max_size < mode->htotal * mode->vtotal)
887 			max_size = mode->htotal * mode->vtotal;
888 	}
889 
890 	if (max_size) {
891 		int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
892 			    AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
893 			    &compressor->gpu_addr, &compressor->cpu_addr);
894 
895 		if (r)
896 			DRM_ERROR("DM: Failed to initialize FBC\n");
897 		else {
898 			adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
899 			DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
900 		}
901 
902 	}
903 
904 }
905 
906 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
907 					  int pipe, bool *enabled,
908 					  unsigned char *buf, int max_bytes)
909 {
910 	struct drm_device *dev = dev_get_drvdata(kdev);
911 	struct amdgpu_device *adev = drm_to_adev(dev);
912 	struct drm_connector *connector;
913 	struct drm_connector_list_iter conn_iter;
914 	struct amdgpu_dm_connector *aconnector;
915 	int ret = 0;
916 
917 	*enabled = false;
918 
919 	mutex_lock(&adev->dm.audio_lock);
920 
921 	drm_connector_list_iter_begin(dev, &conn_iter);
922 	drm_for_each_connector_iter(connector, &conn_iter) {
923 		aconnector = to_amdgpu_dm_connector(connector);
924 		if (aconnector->audio_inst != port)
925 			continue;
926 
927 		*enabled = true;
928 		ret = drm_eld_size(connector->eld);
929 		memcpy(buf, connector->eld, min(max_bytes, ret));
930 
931 		break;
932 	}
933 	drm_connector_list_iter_end(&conn_iter);
934 
935 	mutex_unlock(&adev->dm.audio_lock);
936 
937 	DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
938 
939 	return ret;
940 }
941 
942 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
943 	.get_eld = amdgpu_dm_audio_component_get_eld,
944 };
945 
946 static int amdgpu_dm_audio_component_bind(struct device *kdev,
947 				       struct device *hda_kdev, void *data)
948 {
949 	struct drm_device *dev = dev_get_drvdata(kdev);
950 	struct amdgpu_device *adev = drm_to_adev(dev);
951 	struct drm_audio_component *acomp = data;
952 
953 	acomp->ops = &amdgpu_dm_audio_component_ops;
954 	acomp->dev = kdev;
955 	adev->dm.audio_component = acomp;
956 
957 	return 0;
958 }
959 
960 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
961 					  struct device *hda_kdev, void *data)
962 {
963 	struct drm_device *dev = dev_get_drvdata(kdev);
964 	struct amdgpu_device *adev = drm_to_adev(dev);
965 	struct drm_audio_component *acomp = data;
966 
967 	acomp->ops = NULL;
968 	acomp->dev = NULL;
969 	adev->dm.audio_component = NULL;
970 }
971 
972 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
973 	.bind	= amdgpu_dm_audio_component_bind,
974 	.unbind	= amdgpu_dm_audio_component_unbind,
975 };
976 
977 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
978 {
979 	int i, ret;
980 
981 	if (!amdgpu_audio)
982 		return 0;
983 
984 	adev->mode_info.audio.enabled = true;
985 
986 	adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
987 
988 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
989 		adev->mode_info.audio.pin[i].channels = -1;
990 		adev->mode_info.audio.pin[i].rate = -1;
991 		adev->mode_info.audio.pin[i].bits_per_sample = -1;
992 		adev->mode_info.audio.pin[i].status_bits = 0;
993 		adev->mode_info.audio.pin[i].category_code = 0;
994 		adev->mode_info.audio.pin[i].connected = false;
995 		adev->mode_info.audio.pin[i].id =
996 			adev->dm.dc->res_pool->audios[i]->inst;
997 		adev->mode_info.audio.pin[i].offset = 0;
998 	}
999 
1000 	ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
1001 	if (ret < 0)
1002 		return ret;
1003 
1004 	adev->dm.audio_registered = true;
1005 
1006 	return 0;
1007 }
1008 
1009 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
1010 {
1011 	if (!amdgpu_audio)
1012 		return;
1013 
1014 	if (!adev->mode_info.audio.enabled)
1015 		return;
1016 
1017 	if (adev->dm.audio_registered) {
1018 		component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
1019 		adev->dm.audio_registered = false;
1020 	}
1021 
1022 	/* TODO: Disable audio? */
1023 
1024 	adev->mode_info.audio.enabled = false;
1025 }
1026 
1027 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
1028 {
1029 	struct drm_audio_component *acomp = adev->dm.audio_component;
1030 
1031 	if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
1032 		DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
1033 
1034 		acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
1035 						 pin, -1);
1036 	}
1037 }
1038 
1039 static int dm_dmub_hw_init(struct amdgpu_device *adev)
1040 {
1041 	const struct dmcub_firmware_header_v1_0 *hdr;
1042 	struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1043 	struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
1044 	const struct firmware *dmub_fw = adev->dm.dmub_fw;
1045 	struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
1046 	struct abm *abm = adev->dm.dc->res_pool->abm;
1047 	struct dmub_srv_hw_params hw_params;
1048 	enum dmub_status status;
1049 	const unsigned char *fw_inst_const, *fw_bss_data;
1050 	uint32_t i, fw_inst_const_size, fw_bss_data_size;
1051 	bool has_hw_support;
1052 
1053 	if (!dmub_srv)
1054 		/* DMUB isn't supported on the ASIC. */
1055 		return 0;
1056 
1057 	if (!fb_info) {
1058 		DRM_ERROR("No framebuffer info for DMUB service.\n");
1059 		return -EINVAL;
1060 	}
1061 
1062 	if (!dmub_fw) {
1063 		/* Firmware required for DMUB support. */
1064 		DRM_ERROR("No firmware provided for DMUB.\n");
1065 		return -EINVAL;
1066 	}
1067 
1068 	status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
1069 	if (status != DMUB_STATUS_OK) {
1070 		DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
1071 		return -EINVAL;
1072 	}
1073 
1074 	if (!has_hw_support) {
1075 		DRM_INFO("DMUB unsupported on ASIC\n");
1076 		return 0;
1077 	}
1078 
1079 	/* Reset DMCUB if it was previously running - before we overwrite its memory. */
1080 	status = dmub_srv_hw_reset(dmub_srv);
1081 	if (status != DMUB_STATUS_OK)
1082 		DRM_WARN("Error resetting DMUB HW: %d\n", status);
1083 
1084 	hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
1085 
1086 	fw_inst_const = dmub_fw->data +
1087 			le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1088 			PSP_HEADER_BYTES;
1089 
1090 	fw_bss_data = dmub_fw->data +
1091 		      le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1092 		      le32_to_cpu(hdr->inst_const_bytes);
1093 
1094 	/* Copy firmware and bios info into FB memory. */
1095 	fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1096 			     PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1097 
1098 	fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1099 
1100 	/* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
1101 	 * amdgpu_ucode_init_single_fw will load dmub firmware
1102 	 * fw_inst_const part to cw0; otherwise, the firmware back door load
1103 	 * will be done by dm_dmub_hw_init
1104 	 */
1105 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1106 		memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
1107 				fw_inst_const_size);
1108 	}
1109 
1110 	if (fw_bss_data_size)
1111 		memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
1112 		       fw_bss_data, fw_bss_data_size);
1113 
1114 	/* Copy firmware bios info into FB memory. */
1115 	memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
1116 	       adev->bios_size);
1117 
1118 	/* Reset regions that need to be reset. */
1119 	memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
1120 	fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
1121 
1122 	memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
1123 	       fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
1124 
1125 	memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
1126 	       fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
1127 
1128 	/* Initialize hardware. */
1129 	memset(&hw_params, 0, sizeof(hw_params));
1130 	hw_params.fb_base = adev->gmc.fb_start;
1131 	hw_params.fb_offset = adev->gmc.aper_base;
1132 
1133 	/* backdoor load firmware and trigger dmub running */
1134 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
1135 		hw_params.load_inst_const = true;
1136 
1137 	if (dmcu)
1138 		hw_params.psp_version = dmcu->psp_version;
1139 
1140 	for (i = 0; i < fb_info->num_fb; ++i)
1141 		hw_params.fb[i] = &fb_info->fb[i];
1142 
1143 	switch (adev->ip_versions[DCE_HWIP][0]) {
1144 	case IP_VERSION(3, 1, 3): /* Only for this asic hw internal rev B0 */
1145 		hw_params.dpia_supported = true;
1146 		hw_params.disable_dpia = adev->dm.dc->debug.dpia_debug.bits.disable_dpia;
1147 		break;
1148 	default:
1149 		break;
1150 	}
1151 
1152 	status = dmub_srv_hw_init(dmub_srv, &hw_params);
1153 	if (status != DMUB_STATUS_OK) {
1154 		DRM_ERROR("Error initializing DMUB HW: %d\n", status);
1155 		return -EINVAL;
1156 	}
1157 
1158 	/* Wait for firmware load to finish. */
1159 	status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1160 	if (status != DMUB_STATUS_OK)
1161 		DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1162 
1163 	/* Init DMCU and ABM if available. */
1164 	if (dmcu && abm) {
1165 		dmcu->funcs->dmcu_init(dmcu);
1166 		abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1167 	}
1168 
1169 	if (!adev->dm.dc->ctx->dmub_srv)
1170 		adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
1171 	if (!adev->dm.dc->ctx->dmub_srv) {
1172 		DRM_ERROR("Couldn't allocate DC DMUB server!\n");
1173 		return -ENOMEM;
1174 	}
1175 
1176 	DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
1177 		 adev->dm.dmcub_fw_version);
1178 
1179 	return 0;
1180 }
1181 
1182 static void dm_dmub_hw_resume(struct amdgpu_device *adev)
1183 {
1184 	struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1185 	enum dmub_status status;
1186 	bool init;
1187 
1188 	if (!dmub_srv) {
1189 		/* DMUB isn't supported on the ASIC. */
1190 		return;
1191 	}
1192 
1193 	status = dmub_srv_is_hw_init(dmub_srv, &init);
1194 	if (status != DMUB_STATUS_OK)
1195 		DRM_WARN("DMUB hardware init check failed: %d\n", status);
1196 
1197 	if (status == DMUB_STATUS_OK && init) {
1198 		/* Wait for firmware load to finish. */
1199 		status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1200 		if (status != DMUB_STATUS_OK)
1201 			DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1202 	} else {
1203 		/* Perform the full hardware initialization. */
1204 		dm_dmub_hw_init(adev);
1205 	}
1206 }
1207 
1208 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
1209 {
1210 	uint64_t pt_base;
1211 	uint32_t logical_addr_low;
1212 	uint32_t logical_addr_high;
1213 	uint32_t agp_base, agp_bot, agp_top;
1214 	PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
1215 
1216 	memset(pa_config, 0, sizeof(*pa_config));
1217 
1218 	logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
1219 	pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
1220 
1221 	if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1222 		/*
1223 		 * Raven2 has a HW issue that it is unable to use the vram which
1224 		 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1225 		 * workaround that increase system aperture high address (add 1)
1226 		 * to get rid of the VM fault and hardware hang.
1227 		 */
1228 		logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
1229 	else
1230 		logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
1231 
1232 	agp_base = 0;
1233 	agp_bot = adev->gmc.agp_start >> 24;
1234 	agp_top = adev->gmc.agp_end >> 24;
1235 
1236 
1237 	page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
1238 	page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
1239 	page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
1240 	page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
1241 	page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
1242 	page_table_base.low_part = lower_32_bits(pt_base);
1243 
1244 	pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1245 	pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1246 
1247 	pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1248 	pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1249 	pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1250 
1251 	pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1252 	pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1253 	pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1254 
1255 	pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1256 	pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1257 	pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1258 
1259 	pa_config->is_hvm_enabled = 0;
1260 
1261 }
1262 
1263 static void vblank_control_worker(struct work_struct *work)
1264 {
1265 	struct vblank_control_work *vblank_work =
1266 		container_of(work, struct vblank_control_work, work);
1267 	struct amdgpu_display_manager *dm = vblank_work->dm;
1268 
1269 	mutex_lock(&dm->dc_lock);
1270 
1271 	if (vblank_work->enable)
1272 		dm->active_vblank_irq_count++;
1273 	else if(dm->active_vblank_irq_count)
1274 		dm->active_vblank_irq_count--;
1275 
1276 	dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
1277 
1278 	DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
1279 
1280 	/* Control PSR based on vblank requirements from OS */
1281 	if (vblank_work->stream && vblank_work->stream->link) {
1282 		if (vblank_work->enable) {
1283 			if (vblank_work->stream->link->psr_settings.psr_allow_active)
1284 				amdgpu_dm_psr_disable(vblank_work->stream);
1285 		} else if (vblank_work->stream->link->psr_settings.psr_feature_enabled &&
1286 			   !vblank_work->stream->link->psr_settings.psr_allow_active &&
1287 			   vblank_work->acrtc->dm_irq_params.allow_psr_entry) {
1288 			amdgpu_dm_psr_enable(vblank_work->stream);
1289 		}
1290 	}
1291 
1292 	mutex_unlock(&dm->dc_lock);
1293 
1294 	dc_stream_release(vblank_work->stream);
1295 
1296 	kfree(vblank_work);
1297 }
1298 
1299 static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
1300 {
1301 	struct hpd_rx_irq_offload_work *offload_work;
1302 	struct amdgpu_dm_connector *aconnector;
1303 	struct dc_link *dc_link;
1304 	struct amdgpu_device *adev;
1305 	enum dc_connection_type new_connection_type = dc_connection_none;
1306 	unsigned long flags;
1307 
1308 	offload_work = container_of(work, struct hpd_rx_irq_offload_work, work);
1309 	aconnector = offload_work->offload_wq->aconnector;
1310 
1311 	if (!aconnector) {
1312 		DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work");
1313 		goto skip;
1314 	}
1315 
1316 	adev = drm_to_adev(aconnector->base.dev);
1317 	dc_link = aconnector->dc_link;
1318 
1319 	mutex_lock(&aconnector->hpd_lock);
1320 	if (!dc_link_detect_sink(dc_link, &new_connection_type))
1321 		DRM_ERROR("KMS: Failed to detect connector\n");
1322 	mutex_unlock(&aconnector->hpd_lock);
1323 
1324 	if (new_connection_type == dc_connection_none)
1325 		goto skip;
1326 
1327 	if (amdgpu_in_reset(adev))
1328 		goto skip;
1329 
1330 	mutex_lock(&adev->dm.dc_lock);
1331 	if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST)
1332 		dc_link_dp_handle_automated_test(dc_link);
1333 	else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
1334 			hpd_rx_irq_check_link_loss_status(dc_link, &offload_work->data) &&
1335 			dc_link_dp_allow_hpd_rx_irq(dc_link)) {
1336 		dc_link_dp_handle_link_loss(dc_link);
1337 		spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
1338 		offload_work->offload_wq->is_handling_link_loss = false;
1339 		spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
1340 	}
1341 	mutex_unlock(&adev->dm.dc_lock);
1342 
1343 skip:
1344 	kfree(offload_work);
1345 
1346 }
1347 
1348 static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc)
1349 {
1350 	int max_caps = dc->caps.max_links;
1351 	int i = 0;
1352 	struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL;
1353 
1354 	hpd_rx_offload_wq = kcalloc(max_caps, sizeof(*hpd_rx_offload_wq), GFP_KERNEL);
1355 
1356 	if (!hpd_rx_offload_wq)
1357 		return NULL;
1358 
1359 
1360 	for (i = 0; i < max_caps; i++) {
1361 		hpd_rx_offload_wq[i].wq =
1362 				    create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq");
1363 
1364 		if (hpd_rx_offload_wq[i].wq == NULL) {
1365 			DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!");
1366 			return NULL;
1367 		}
1368 
1369 		spin_lock_init(&hpd_rx_offload_wq[i].offload_lock);
1370 	}
1371 
1372 	return hpd_rx_offload_wq;
1373 }
1374 
1375 struct amdgpu_stutter_quirk {
1376 	u16 chip_vendor;
1377 	u16 chip_device;
1378 	u16 subsys_vendor;
1379 	u16 subsys_device;
1380 	u8 revision;
1381 };
1382 
1383 static const struct amdgpu_stutter_quirk amdgpu_stutter_quirk_list[] = {
1384 	/* https://bugzilla.kernel.org/show_bug.cgi?id=214417 */
1385 	{ 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
1386 	{ 0, 0, 0, 0, 0 },
1387 };
1388 
1389 static bool dm_should_disable_stutter(struct pci_dev *pdev)
1390 {
1391 	const struct amdgpu_stutter_quirk *p = amdgpu_stutter_quirk_list;
1392 
1393 	while (p && p->chip_device != 0) {
1394 		if (pdev->vendor == p->chip_vendor &&
1395 		    pdev->device == p->chip_device &&
1396 		    pdev->subsystem_vendor == p->subsys_vendor &&
1397 		    pdev->subsystem_device == p->subsys_device &&
1398 		    pdev->revision == p->revision) {
1399 			return true;
1400 		}
1401 		++p;
1402 	}
1403 	return false;
1404 }
1405 
1406 static const struct dmi_system_id hpd_disconnect_quirk_table[] = {
1407 	{
1408 		.matches = {
1409 			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1410 			DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3660"),
1411 		},
1412 	},
1413 	{
1414 		.matches = {
1415 			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1416 			DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3260"),
1417 		},
1418 	},
1419 	{
1420 		.matches = {
1421 			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1422 			DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3460"),
1423 		},
1424 	},
1425 	{}
1426 };
1427 
1428 static void retrieve_dmi_info(struct amdgpu_display_manager *dm)
1429 {
1430 	const struct dmi_system_id *dmi_id;
1431 
1432 	dm->aux_hpd_discon_quirk = false;
1433 
1434 	dmi_id = dmi_first_match(hpd_disconnect_quirk_table);
1435 	if (dmi_id) {
1436 		dm->aux_hpd_discon_quirk = true;
1437 		DRM_INFO("aux_hpd_discon_quirk attached\n");
1438 	}
1439 }
1440 
1441 static int amdgpu_dm_init(struct amdgpu_device *adev)
1442 {
1443 	struct dc_init_data init_data;
1444 #ifdef CONFIG_DRM_AMD_DC_HDCP
1445 	struct dc_callback_init init_params;
1446 #endif
1447 	int r;
1448 
1449 	adev->dm.ddev = adev_to_drm(adev);
1450 	adev->dm.adev = adev;
1451 
1452 	/* Zero all the fields */
1453 	memset(&init_data, 0, sizeof(init_data));
1454 #ifdef CONFIG_DRM_AMD_DC_HDCP
1455 	memset(&init_params, 0, sizeof(init_params));
1456 #endif
1457 
1458 	mutex_init(&adev->dm.dc_lock);
1459 	mutex_init(&adev->dm.audio_lock);
1460 	spin_lock_init(&adev->dm.vblank_lock);
1461 
1462 	if(amdgpu_dm_irq_init(adev)) {
1463 		DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1464 		goto error;
1465 	}
1466 
1467 	init_data.asic_id.chip_family = adev->family;
1468 
1469 	init_data.asic_id.pci_revision_id = adev->pdev->revision;
1470 	init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1471 	init_data.asic_id.chip_id = adev->pdev->device;
1472 
1473 	init_data.asic_id.vram_width = adev->gmc.vram_width;
1474 	/* TODO: initialize init_data.asic_id.vram_type here!!!! */
1475 	init_data.asic_id.atombios_base_address =
1476 		adev->mode_info.atom_context->bios;
1477 
1478 	init_data.driver = adev;
1479 
1480 	adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1481 
1482 	if (!adev->dm.cgs_device) {
1483 		DRM_ERROR("amdgpu: failed to create cgs device.\n");
1484 		goto error;
1485 	}
1486 
1487 	init_data.cgs_device = adev->dm.cgs_device;
1488 
1489 	init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1490 
1491 	switch (adev->ip_versions[DCE_HWIP][0]) {
1492 	case IP_VERSION(2, 1, 0):
1493 		switch (adev->dm.dmcub_fw_version) {
1494 		case 0: /* development */
1495 		case 0x1: /* linux-firmware.git hash 6d9f399 */
1496 		case 0x01000000: /* linux-firmware.git hash 9a0b0f4 */
1497 			init_data.flags.disable_dmcu = false;
1498 			break;
1499 		default:
1500 			init_data.flags.disable_dmcu = true;
1501 		}
1502 		break;
1503 	case IP_VERSION(2, 0, 3):
1504 		init_data.flags.disable_dmcu = true;
1505 		break;
1506 	default:
1507 		break;
1508 	}
1509 
1510 	switch (adev->asic_type) {
1511 	case CHIP_CARRIZO:
1512 	case CHIP_STONEY:
1513 		init_data.flags.gpu_vm_support = true;
1514 		break;
1515 	default:
1516 		switch (adev->ip_versions[DCE_HWIP][0]) {
1517 		case IP_VERSION(1, 0, 0):
1518 		case IP_VERSION(1, 0, 1):
1519 			/* enable S/G on PCO and RV2 */
1520 			if ((adev->apu_flags & AMD_APU_IS_RAVEN2) ||
1521 			    (adev->apu_flags & AMD_APU_IS_PICASSO))
1522 				init_data.flags.gpu_vm_support = true;
1523 			break;
1524 		case IP_VERSION(2, 1, 0):
1525 		case IP_VERSION(3, 0, 1):
1526 		case IP_VERSION(3, 1, 2):
1527 		case IP_VERSION(3, 1, 3):
1528 		case IP_VERSION(3, 1, 5):
1529 		case IP_VERSION(3, 1, 6):
1530 			init_data.flags.gpu_vm_support = true;
1531 			break;
1532 		default:
1533 			break;
1534 		}
1535 		break;
1536 	}
1537 
1538 	if (init_data.flags.gpu_vm_support)
1539 		adev->mode_info.gpu_vm_support = true;
1540 
1541 	if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1542 		init_data.flags.fbc_support = true;
1543 
1544 	if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1545 		init_data.flags.multi_mon_pp_mclk_switch = true;
1546 
1547 	if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1548 		init_data.flags.disable_fractional_pwm = true;
1549 
1550 	if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING)
1551 		init_data.flags.edp_no_power_sequencing = true;
1552 
1553 	if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP1_4A)
1554 		init_data.flags.allow_lttpr_non_transparent_mode.bits.DP1_4A = true;
1555 	if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP2_0)
1556 		init_data.flags.allow_lttpr_non_transparent_mode.bits.DP2_0 = true;
1557 
1558 	init_data.flags.seamless_boot_edp_requested = false;
1559 
1560 	if (check_seamless_boot_capability(adev)) {
1561 		init_data.flags.seamless_boot_edp_requested = true;
1562 		init_data.flags.allow_seamless_boot_optimization = true;
1563 		DRM_INFO("Seamless boot condition check passed\n");
1564 	}
1565 
1566 	INIT_LIST_HEAD(&adev->dm.da_list);
1567 
1568 	retrieve_dmi_info(&adev->dm);
1569 
1570 	/* Display Core create. */
1571 	adev->dm.dc = dc_create(&init_data);
1572 
1573 	if (adev->dm.dc) {
1574 		DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1575 	} else {
1576 		DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1577 		goto error;
1578 	}
1579 
1580 	if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1581 		adev->dm.dc->debug.force_single_disp_pipe_split = false;
1582 		adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1583 	}
1584 
1585 	if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1586 		adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1587 	if (dm_should_disable_stutter(adev->pdev))
1588 		adev->dm.dc->debug.disable_stutter = true;
1589 
1590 	if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1591 		adev->dm.dc->debug.disable_stutter = true;
1592 
1593 	if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) {
1594 		adev->dm.dc->debug.disable_dsc = true;
1595 		adev->dm.dc->debug.disable_dsc_edp = true;
1596 	}
1597 
1598 	if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1599 		adev->dm.dc->debug.disable_clock_gate = true;
1600 
1601 	r = dm_dmub_hw_init(adev);
1602 	if (r) {
1603 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1604 		goto error;
1605 	}
1606 
1607 	dc_hardware_init(adev->dm.dc);
1608 
1609 	adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc);
1610 	if (!adev->dm.hpd_rx_offload_wq) {
1611 		DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n");
1612 		goto error;
1613 	}
1614 
1615 	if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) {
1616 		struct dc_phy_addr_space_config pa_config;
1617 
1618 		mmhub_read_system_context(adev, &pa_config);
1619 
1620 		// Call the DC init_memory func
1621 		dc_setup_system_context(adev->dm.dc, &pa_config);
1622 	}
1623 
1624 	adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1625 	if (!adev->dm.freesync_module) {
1626 		DRM_ERROR(
1627 		"amdgpu: failed to initialize freesync_module.\n");
1628 	} else
1629 		DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1630 				adev->dm.freesync_module);
1631 
1632 	amdgpu_dm_init_color_mod();
1633 
1634 	if (adev->dm.dc->caps.max_links > 0) {
1635 		adev->dm.vblank_control_workqueue =
1636 			create_singlethread_workqueue("dm_vblank_control_workqueue");
1637 		if (!adev->dm.vblank_control_workqueue)
1638 			DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1639 	}
1640 
1641 #ifdef CONFIG_DRM_AMD_DC_HDCP
1642 	if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) {
1643 		adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1644 
1645 		if (!adev->dm.hdcp_workqueue)
1646 			DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1647 		else
1648 			DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1649 
1650 		dc_init_callbacks(adev->dm.dc, &init_params);
1651 	}
1652 #endif
1653 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1654 	adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
1655 #endif
1656 	if (dc_enable_dmub_notifications(adev->dm.dc)) {
1657 		init_completion(&adev->dm.dmub_aux_transfer_done);
1658 		adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
1659 		if (!adev->dm.dmub_notify) {
1660 			DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
1661 			goto error;
1662 		}
1663 
1664 		adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq");
1665 		if (!adev->dm.delayed_hpd_wq) {
1666 			DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n");
1667 			goto error;
1668 		}
1669 
1670 		amdgpu_dm_outbox_init(adev);
1671 		if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY,
1672 			dmub_aux_setconfig_callback, false)) {
1673 			DRM_ERROR("amdgpu: fail to register dmub aux callback");
1674 			goto error;
1675 		}
1676 		if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) {
1677 			DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1678 			goto error;
1679 		}
1680 		if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true)) {
1681 			DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1682 			goto error;
1683 		}
1684 	}
1685 
1686 	if (amdgpu_dm_initialize_drm_device(adev)) {
1687 		DRM_ERROR(
1688 		"amdgpu: failed to initialize sw for display support.\n");
1689 		goto error;
1690 	}
1691 
1692 	/* create fake encoders for MST */
1693 	dm_dp_create_fake_mst_encoders(adev);
1694 
1695 	/* TODO: Add_display_info? */
1696 
1697 	/* TODO use dynamic cursor width */
1698 	adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1699 	adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1700 
1701 	if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1702 		DRM_ERROR(
1703 		"amdgpu: failed to initialize sw for display support.\n");
1704 		goto error;
1705 	}
1706 
1707 
1708 	DRM_DEBUG_DRIVER("KMS initialized.\n");
1709 
1710 	return 0;
1711 error:
1712 	amdgpu_dm_fini(adev);
1713 
1714 	return -EINVAL;
1715 }
1716 
1717 static int amdgpu_dm_early_fini(void *handle)
1718 {
1719 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1720 
1721 	amdgpu_dm_audio_fini(adev);
1722 
1723 	return 0;
1724 }
1725 
1726 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1727 {
1728 	int i;
1729 
1730 	if (adev->dm.vblank_control_workqueue) {
1731 		destroy_workqueue(adev->dm.vblank_control_workqueue);
1732 		adev->dm.vblank_control_workqueue = NULL;
1733 	}
1734 
1735 	for (i = 0; i < adev->dm.display_indexes_num; i++) {
1736 		drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1737 	}
1738 
1739 	amdgpu_dm_destroy_drm_device(&adev->dm);
1740 
1741 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1742 	if (adev->dm.crc_rd_wrk) {
1743 		flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1744 		kfree(adev->dm.crc_rd_wrk);
1745 		adev->dm.crc_rd_wrk = NULL;
1746 	}
1747 #endif
1748 #ifdef CONFIG_DRM_AMD_DC_HDCP
1749 	if (adev->dm.hdcp_workqueue) {
1750 		hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1751 		adev->dm.hdcp_workqueue = NULL;
1752 	}
1753 
1754 	if (adev->dm.dc)
1755 		dc_deinit_callbacks(adev->dm.dc);
1756 #endif
1757 
1758 	dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1759 
1760 	if (dc_enable_dmub_notifications(adev->dm.dc)) {
1761 		kfree(adev->dm.dmub_notify);
1762 		adev->dm.dmub_notify = NULL;
1763 		destroy_workqueue(adev->dm.delayed_hpd_wq);
1764 		adev->dm.delayed_hpd_wq = NULL;
1765 	}
1766 
1767 	if (adev->dm.dmub_bo)
1768 		amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1769 				      &adev->dm.dmub_bo_gpu_addr,
1770 				      &adev->dm.dmub_bo_cpu_addr);
1771 
1772 	if (adev->dm.hpd_rx_offload_wq) {
1773 		for (i = 0; i < adev->dm.dc->caps.max_links; i++) {
1774 			if (adev->dm.hpd_rx_offload_wq[i].wq) {
1775 				destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq);
1776 				adev->dm.hpd_rx_offload_wq[i].wq = NULL;
1777 			}
1778 		}
1779 
1780 		kfree(adev->dm.hpd_rx_offload_wq);
1781 		adev->dm.hpd_rx_offload_wq = NULL;
1782 	}
1783 
1784 	/* DC Destroy TODO: Replace destroy DAL */
1785 	if (adev->dm.dc)
1786 		dc_destroy(&adev->dm.dc);
1787 	/*
1788 	 * TODO: pageflip, vlank interrupt
1789 	 *
1790 	 * amdgpu_dm_irq_fini(adev);
1791 	 */
1792 
1793 	if (adev->dm.cgs_device) {
1794 		amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1795 		adev->dm.cgs_device = NULL;
1796 	}
1797 	if (adev->dm.freesync_module) {
1798 		mod_freesync_destroy(adev->dm.freesync_module);
1799 		adev->dm.freesync_module = NULL;
1800 	}
1801 
1802 	mutex_destroy(&adev->dm.audio_lock);
1803 	mutex_destroy(&adev->dm.dc_lock);
1804 
1805 	return;
1806 }
1807 
1808 static int load_dmcu_fw(struct amdgpu_device *adev)
1809 {
1810 	const char *fw_name_dmcu = NULL;
1811 	int r;
1812 	const struct dmcu_firmware_header_v1_0 *hdr;
1813 
1814 	switch(adev->asic_type) {
1815 #if defined(CONFIG_DRM_AMD_DC_SI)
1816 	case CHIP_TAHITI:
1817 	case CHIP_PITCAIRN:
1818 	case CHIP_VERDE:
1819 	case CHIP_OLAND:
1820 #endif
1821 	case CHIP_BONAIRE:
1822 	case CHIP_HAWAII:
1823 	case CHIP_KAVERI:
1824 	case CHIP_KABINI:
1825 	case CHIP_MULLINS:
1826 	case CHIP_TONGA:
1827 	case CHIP_FIJI:
1828 	case CHIP_CARRIZO:
1829 	case CHIP_STONEY:
1830 	case CHIP_POLARIS11:
1831 	case CHIP_POLARIS10:
1832 	case CHIP_POLARIS12:
1833 	case CHIP_VEGAM:
1834 	case CHIP_VEGA10:
1835 	case CHIP_VEGA12:
1836 	case CHIP_VEGA20:
1837 		return 0;
1838 	case CHIP_NAVI12:
1839 		fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1840 		break;
1841 	case CHIP_RAVEN:
1842 		if (ASICREV_IS_PICASSO(adev->external_rev_id))
1843 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1844 		else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1845 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1846 		else
1847 			return 0;
1848 		break;
1849 	default:
1850 		switch (adev->ip_versions[DCE_HWIP][0]) {
1851 		case IP_VERSION(2, 0, 2):
1852 		case IP_VERSION(2, 0, 3):
1853 		case IP_VERSION(2, 0, 0):
1854 		case IP_VERSION(2, 1, 0):
1855 		case IP_VERSION(3, 0, 0):
1856 		case IP_VERSION(3, 0, 2):
1857 		case IP_VERSION(3, 0, 3):
1858 		case IP_VERSION(3, 0, 1):
1859 		case IP_VERSION(3, 1, 2):
1860 		case IP_VERSION(3, 1, 3):
1861 		case IP_VERSION(3, 1, 5):
1862 		case IP_VERSION(3, 1, 6):
1863 			return 0;
1864 		default:
1865 			break;
1866 		}
1867 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1868 		return -EINVAL;
1869 	}
1870 
1871 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1872 		DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1873 		return 0;
1874 	}
1875 
1876 	r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1877 	if (r == -ENOENT) {
1878 		/* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1879 		DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1880 		adev->dm.fw_dmcu = NULL;
1881 		return 0;
1882 	}
1883 	if (r) {
1884 		dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1885 			fw_name_dmcu);
1886 		return r;
1887 	}
1888 
1889 	r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1890 	if (r) {
1891 		dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1892 			fw_name_dmcu);
1893 		release_firmware(adev->dm.fw_dmcu);
1894 		adev->dm.fw_dmcu = NULL;
1895 		return r;
1896 	}
1897 
1898 	hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1899 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1900 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1901 	adev->firmware.fw_size +=
1902 		ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1903 
1904 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1905 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1906 	adev->firmware.fw_size +=
1907 		ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1908 
1909 	adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1910 
1911 	DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1912 
1913 	return 0;
1914 }
1915 
1916 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1917 {
1918 	struct amdgpu_device *adev = ctx;
1919 
1920 	return dm_read_reg(adev->dm.dc->ctx, address);
1921 }
1922 
1923 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1924 				     uint32_t value)
1925 {
1926 	struct amdgpu_device *adev = ctx;
1927 
1928 	return dm_write_reg(adev->dm.dc->ctx, address, value);
1929 }
1930 
1931 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1932 {
1933 	struct dmub_srv_create_params create_params;
1934 	struct dmub_srv_region_params region_params;
1935 	struct dmub_srv_region_info region_info;
1936 	struct dmub_srv_fb_params fb_params;
1937 	struct dmub_srv_fb_info *fb_info;
1938 	struct dmub_srv *dmub_srv;
1939 	const struct dmcub_firmware_header_v1_0 *hdr;
1940 	const char *fw_name_dmub;
1941 	enum dmub_asic dmub_asic;
1942 	enum dmub_status status;
1943 	int r;
1944 
1945 	switch (adev->ip_versions[DCE_HWIP][0]) {
1946 	case IP_VERSION(2, 1, 0):
1947 		dmub_asic = DMUB_ASIC_DCN21;
1948 		fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1949 		if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1950 			fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1951 		break;
1952 	case IP_VERSION(3, 0, 0):
1953 		if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) {
1954 			dmub_asic = DMUB_ASIC_DCN30;
1955 			fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1956 		} else {
1957 			dmub_asic = DMUB_ASIC_DCN30;
1958 			fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1959 		}
1960 		break;
1961 	case IP_VERSION(3, 0, 1):
1962 		dmub_asic = DMUB_ASIC_DCN301;
1963 		fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1964 		break;
1965 	case IP_VERSION(3, 0, 2):
1966 		dmub_asic = DMUB_ASIC_DCN302;
1967 		fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1968 		break;
1969 	case IP_VERSION(3, 0, 3):
1970 		dmub_asic = DMUB_ASIC_DCN303;
1971 		fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
1972 		break;
1973 	case IP_VERSION(3, 1, 2):
1974 	case IP_VERSION(3, 1, 3):
1975 		dmub_asic = (adev->external_rev_id == YELLOW_CARP_B0) ? DMUB_ASIC_DCN31B : DMUB_ASIC_DCN31;
1976 		fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
1977 		break;
1978 	case IP_VERSION(3, 1, 5):
1979 		dmub_asic = DMUB_ASIC_DCN315;
1980 		fw_name_dmub = FIRMWARE_DCN_315_DMUB;
1981 		break;
1982 	case IP_VERSION(3, 1, 6):
1983 		dmub_asic = DMUB_ASIC_DCN316;
1984 		fw_name_dmub = FIRMWARE_DCN316_DMUB;
1985 		break;
1986 	default:
1987 		/* ASIC doesn't support DMUB. */
1988 		return 0;
1989 	}
1990 
1991 	r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1992 	if (r) {
1993 		DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1994 		return 0;
1995 	}
1996 
1997 	r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1998 	if (r) {
1999 		DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
2000 		return 0;
2001 	}
2002 
2003 	hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
2004 	adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
2005 
2006 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
2007 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
2008 			AMDGPU_UCODE_ID_DMCUB;
2009 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
2010 			adev->dm.dmub_fw;
2011 		adev->firmware.fw_size +=
2012 			ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
2013 
2014 		DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
2015 			 adev->dm.dmcub_fw_version);
2016 	}
2017 
2018 
2019 	adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
2020 	dmub_srv = adev->dm.dmub_srv;
2021 
2022 	if (!dmub_srv) {
2023 		DRM_ERROR("Failed to allocate DMUB service!\n");
2024 		return -ENOMEM;
2025 	}
2026 
2027 	memset(&create_params, 0, sizeof(create_params));
2028 	create_params.user_ctx = adev;
2029 	create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
2030 	create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
2031 	create_params.asic = dmub_asic;
2032 
2033 	/* Create the DMUB service. */
2034 	status = dmub_srv_create(dmub_srv, &create_params);
2035 	if (status != DMUB_STATUS_OK) {
2036 		DRM_ERROR("Error creating DMUB service: %d\n", status);
2037 		return -EINVAL;
2038 	}
2039 
2040 	/* Calculate the size of all the regions for the DMUB service. */
2041 	memset(&region_params, 0, sizeof(region_params));
2042 
2043 	region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
2044 					PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
2045 	region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
2046 	region_params.vbios_size = adev->bios_size;
2047 	region_params.fw_bss_data = region_params.bss_data_size ?
2048 		adev->dm.dmub_fw->data +
2049 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
2050 		le32_to_cpu(hdr->inst_const_bytes) : NULL;
2051 	region_params.fw_inst_const =
2052 		adev->dm.dmub_fw->data +
2053 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
2054 		PSP_HEADER_BYTES;
2055 
2056 	status = dmub_srv_calc_region_info(dmub_srv, &region_params,
2057 					   &region_info);
2058 
2059 	if (status != DMUB_STATUS_OK) {
2060 		DRM_ERROR("Error calculating DMUB region info: %d\n", status);
2061 		return -EINVAL;
2062 	}
2063 
2064 	/*
2065 	 * Allocate a framebuffer based on the total size of all the regions.
2066 	 * TODO: Move this into GART.
2067 	 */
2068 	r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
2069 				    AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
2070 				    &adev->dm.dmub_bo_gpu_addr,
2071 				    &adev->dm.dmub_bo_cpu_addr);
2072 	if (r)
2073 		return r;
2074 
2075 	/* Rebase the regions on the framebuffer address. */
2076 	memset(&fb_params, 0, sizeof(fb_params));
2077 	fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
2078 	fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
2079 	fb_params.region_info = &region_info;
2080 
2081 	adev->dm.dmub_fb_info =
2082 		kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
2083 	fb_info = adev->dm.dmub_fb_info;
2084 
2085 	if (!fb_info) {
2086 		DRM_ERROR(
2087 			"Failed to allocate framebuffer info for DMUB service!\n");
2088 		return -ENOMEM;
2089 	}
2090 
2091 	status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
2092 	if (status != DMUB_STATUS_OK) {
2093 		DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
2094 		return -EINVAL;
2095 	}
2096 
2097 	return 0;
2098 }
2099 
2100 static int dm_sw_init(void *handle)
2101 {
2102 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2103 	int r;
2104 
2105 	r = dm_dmub_sw_init(adev);
2106 	if (r)
2107 		return r;
2108 
2109 	return load_dmcu_fw(adev);
2110 }
2111 
2112 static int dm_sw_fini(void *handle)
2113 {
2114 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2115 
2116 	kfree(adev->dm.dmub_fb_info);
2117 	adev->dm.dmub_fb_info = NULL;
2118 
2119 	if (adev->dm.dmub_srv) {
2120 		dmub_srv_destroy(adev->dm.dmub_srv);
2121 		adev->dm.dmub_srv = NULL;
2122 	}
2123 
2124 	release_firmware(adev->dm.dmub_fw);
2125 	adev->dm.dmub_fw = NULL;
2126 
2127 	release_firmware(adev->dm.fw_dmcu);
2128 	adev->dm.fw_dmcu = NULL;
2129 
2130 	return 0;
2131 }
2132 
2133 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
2134 {
2135 	struct amdgpu_dm_connector *aconnector;
2136 	struct drm_connector *connector;
2137 	struct drm_connector_list_iter iter;
2138 	int ret = 0;
2139 
2140 	drm_connector_list_iter_begin(dev, &iter);
2141 	drm_for_each_connector_iter(connector, &iter) {
2142 		aconnector = to_amdgpu_dm_connector(connector);
2143 		if (aconnector->dc_link->type == dc_connection_mst_branch &&
2144 		    aconnector->mst_mgr.aux) {
2145 			DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
2146 					 aconnector,
2147 					 aconnector->base.base.id);
2148 
2149 			ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
2150 			if (ret < 0) {
2151 				DRM_ERROR("DM_MST: Failed to start MST\n");
2152 				aconnector->dc_link->type =
2153 					dc_connection_single;
2154 				break;
2155 			}
2156 		}
2157 	}
2158 	drm_connector_list_iter_end(&iter);
2159 
2160 	return ret;
2161 }
2162 
2163 static int dm_late_init(void *handle)
2164 {
2165 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2166 
2167 	struct dmcu_iram_parameters params;
2168 	unsigned int linear_lut[16];
2169 	int i;
2170 	struct dmcu *dmcu = NULL;
2171 
2172 	dmcu = adev->dm.dc->res_pool->dmcu;
2173 
2174 	for (i = 0; i < 16; i++)
2175 		linear_lut[i] = 0xFFFF * i / 15;
2176 
2177 	params.set = 0;
2178 	params.backlight_ramping_override = false;
2179 	params.backlight_ramping_start = 0xCCCC;
2180 	params.backlight_ramping_reduction = 0xCCCCCCCC;
2181 	params.backlight_lut_array_size = 16;
2182 	params.backlight_lut_array = linear_lut;
2183 
2184 	/* Min backlight level after ABM reduction,  Don't allow below 1%
2185 	 * 0xFFFF x 0.01 = 0x28F
2186 	 */
2187 	params.min_abm_backlight = 0x28F;
2188 	/* In the case where abm is implemented on dmcub,
2189 	* dmcu object will be null.
2190 	* ABM 2.4 and up are implemented on dmcub.
2191 	*/
2192 	if (dmcu) {
2193 		if (!dmcu_load_iram(dmcu, params))
2194 			return -EINVAL;
2195 	} else if (adev->dm.dc->ctx->dmub_srv) {
2196 		struct dc_link *edp_links[MAX_NUM_EDP];
2197 		int edp_num;
2198 
2199 		get_edp_links(adev->dm.dc, edp_links, &edp_num);
2200 		for (i = 0; i < edp_num; i++) {
2201 			if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i))
2202 				return -EINVAL;
2203 		}
2204 	}
2205 
2206 	return detect_mst_link_for_all_connectors(adev_to_drm(adev));
2207 }
2208 
2209 static void s3_handle_mst(struct drm_device *dev, bool suspend)
2210 {
2211 	struct amdgpu_dm_connector *aconnector;
2212 	struct drm_connector *connector;
2213 	struct drm_connector_list_iter iter;
2214 	struct drm_dp_mst_topology_mgr *mgr;
2215 	int ret;
2216 	bool need_hotplug = false;
2217 
2218 	drm_connector_list_iter_begin(dev, &iter);
2219 	drm_for_each_connector_iter(connector, &iter) {
2220 		aconnector = to_amdgpu_dm_connector(connector);
2221 		if (aconnector->dc_link->type != dc_connection_mst_branch ||
2222 		    aconnector->mst_port)
2223 			continue;
2224 
2225 		mgr = &aconnector->mst_mgr;
2226 
2227 		if (suspend) {
2228 			drm_dp_mst_topology_mgr_suspend(mgr);
2229 		} else {
2230 			ret = drm_dp_mst_topology_mgr_resume(mgr, true);
2231 			if (ret < 0) {
2232 				drm_dp_mst_topology_mgr_set_mst(mgr, false);
2233 				need_hotplug = true;
2234 			}
2235 		}
2236 	}
2237 	drm_connector_list_iter_end(&iter);
2238 
2239 	if (need_hotplug)
2240 		drm_kms_helper_hotplug_event(dev);
2241 }
2242 
2243 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
2244 {
2245 	int ret = 0;
2246 
2247 	/* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
2248 	 * on window driver dc implementation.
2249 	 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
2250 	 * should be passed to smu during boot up and resume from s3.
2251 	 * boot up: dc calculate dcn watermark clock settings within dc_create,
2252 	 * dcn20_resource_construct
2253 	 * then call pplib functions below to pass the settings to smu:
2254 	 * smu_set_watermarks_for_clock_ranges
2255 	 * smu_set_watermarks_table
2256 	 * navi10_set_watermarks_table
2257 	 * smu_write_watermarks_table
2258 	 *
2259 	 * For Renoir, clock settings of dcn watermark are also fixed values.
2260 	 * dc has implemented different flow for window driver:
2261 	 * dc_hardware_init / dc_set_power_state
2262 	 * dcn10_init_hw
2263 	 * notify_wm_ranges
2264 	 * set_wm_ranges
2265 	 * -- Linux
2266 	 * smu_set_watermarks_for_clock_ranges
2267 	 * renoir_set_watermarks_table
2268 	 * smu_write_watermarks_table
2269 	 *
2270 	 * For Linux,
2271 	 * dc_hardware_init -> amdgpu_dm_init
2272 	 * dc_set_power_state --> dm_resume
2273 	 *
2274 	 * therefore, this function apply to navi10/12/14 but not Renoir
2275 	 * *
2276 	 */
2277 	switch (adev->ip_versions[DCE_HWIP][0]) {
2278 	case IP_VERSION(2, 0, 2):
2279 	case IP_VERSION(2, 0, 0):
2280 		break;
2281 	default:
2282 		return 0;
2283 	}
2284 
2285 	ret = amdgpu_dpm_write_watermarks_table(adev);
2286 	if (ret) {
2287 		DRM_ERROR("Failed to update WMTABLE!\n");
2288 		return ret;
2289 	}
2290 
2291 	return 0;
2292 }
2293 
2294 /**
2295  * dm_hw_init() - Initialize DC device
2296  * @handle: The base driver device containing the amdgpu_dm device.
2297  *
2298  * Initialize the &struct amdgpu_display_manager device. This involves calling
2299  * the initializers of each DM component, then populating the struct with them.
2300  *
2301  * Although the function implies hardware initialization, both hardware and
2302  * software are initialized here. Splitting them out to their relevant init
2303  * hooks is a future TODO item.
2304  *
2305  * Some notable things that are initialized here:
2306  *
2307  * - Display Core, both software and hardware
2308  * - DC modules that we need (freesync and color management)
2309  * - DRM software states
2310  * - Interrupt sources and handlers
2311  * - Vblank support
2312  * - Debug FS entries, if enabled
2313  */
2314 static int dm_hw_init(void *handle)
2315 {
2316 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2317 	/* Create DAL display manager */
2318 	amdgpu_dm_init(adev);
2319 	amdgpu_dm_hpd_init(adev);
2320 
2321 	return 0;
2322 }
2323 
2324 /**
2325  * dm_hw_fini() - Teardown DC device
2326  * @handle: The base driver device containing the amdgpu_dm device.
2327  *
2328  * Teardown components within &struct amdgpu_display_manager that require
2329  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
2330  * were loaded. Also flush IRQ workqueues and disable them.
2331  */
2332 static int dm_hw_fini(void *handle)
2333 {
2334 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2335 
2336 	amdgpu_dm_hpd_fini(adev);
2337 
2338 	amdgpu_dm_irq_fini(adev);
2339 	amdgpu_dm_fini(adev);
2340 	return 0;
2341 }
2342 
2343 
2344 static int dm_enable_vblank(struct drm_crtc *crtc);
2345 static void dm_disable_vblank(struct drm_crtc *crtc);
2346 
2347 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
2348 				 struct dc_state *state, bool enable)
2349 {
2350 	enum dc_irq_source irq_source;
2351 	struct amdgpu_crtc *acrtc;
2352 	int rc = -EBUSY;
2353 	int i = 0;
2354 
2355 	for (i = 0; i < state->stream_count; i++) {
2356 		acrtc = get_crtc_by_otg_inst(
2357 				adev, state->stream_status[i].primary_otg_inst);
2358 
2359 		if (acrtc && state->stream_status[i].plane_count != 0) {
2360 			irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
2361 			rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
2362 			DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
2363 				      acrtc->crtc_id, enable ? "en" : "dis", rc);
2364 			if (rc)
2365 				DRM_WARN("Failed to %s pflip interrupts\n",
2366 					 enable ? "enable" : "disable");
2367 
2368 			if (enable) {
2369 				rc = dm_enable_vblank(&acrtc->base);
2370 				if (rc)
2371 					DRM_WARN("Failed to enable vblank interrupts\n");
2372 			} else {
2373 				dm_disable_vblank(&acrtc->base);
2374 			}
2375 
2376 		}
2377 	}
2378 
2379 }
2380 
2381 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
2382 {
2383 	struct dc_state *context = NULL;
2384 	enum dc_status res = DC_ERROR_UNEXPECTED;
2385 	int i;
2386 	struct dc_stream_state *del_streams[MAX_PIPES];
2387 	int del_streams_count = 0;
2388 
2389 	memset(del_streams, 0, sizeof(del_streams));
2390 
2391 	context = dc_create_state(dc);
2392 	if (context == NULL)
2393 		goto context_alloc_fail;
2394 
2395 	dc_resource_state_copy_construct_current(dc, context);
2396 
2397 	/* First remove from context all streams */
2398 	for (i = 0; i < context->stream_count; i++) {
2399 		struct dc_stream_state *stream = context->streams[i];
2400 
2401 		del_streams[del_streams_count++] = stream;
2402 	}
2403 
2404 	/* Remove all planes for removed streams and then remove the streams */
2405 	for (i = 0; i < del_streams_count; i++) {
2406 		if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
2407 			res = DC_FAIL_DETACH_SURFACES;
2408 			goto fail;
2409 		}
2410 
2411 		res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
2412 		if (res != DC_OK)
2413 			goto fail;
2414 	}
2415 
2416 	res = dc_commit_state(dc, context);
2417 
2418 fail:
2419 	dc_release_state(context);
2420 
2421 context_alloc_fail:
2422 	return res;
2423 }
2424 
2425 static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm)
2426 {
2427 	int i;
2428 
2429 	if (dm->hpd_rx_offload_wq) {
2430 		for (i = 0; i < dm->dc->caps.max_links; i++)
2431 			flush_workqueue(dm->hpd_rx_offload_wq[i].wq);
2432 	}
2433 }
2434 
2435 static int dm_suspend(void *handle)
2436 {
2437 	struct amdgpu_device *adev = handle;
2438 	struct amdgpu_display_manager *dm = &adev->dm;
2439 	int ret = 0;
2440 
2441 	if (amdgpu_in_reset(adev)) {
2442 		mutex_lock(&dm->dc_lock);
2443 
2444 		dc_allow_idle_optimizations(adev->dm.dc, false);
2445 
2446 		dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
2447 
2448 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
2449 
2450 		amdgpu_dm_commit_zero_streams(dm->dc);
2451 
2452 		amdgpu_dm_irq_suspend(adev);
2453 
2454 		hpd_rx_irq_work_suspend(dm);
2455 
2456 		return ret;
2457 	}
2458 
2459 	WARN_ON(adev->dm.cached_state);
2460 	adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
2461 
2462 	s3_handle_mst(adev_to_drm(adev), true);
2463 
2464 	amdgpu_dm_irq_suspend(adev);
2465 
2466 	hpd_rx_irq_work_suspend(dm);
2467 
2468 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
2469 
2470 	return 0;
2471 }
2472 
2473 struct amdgpu_dm_connector *
2474 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
2475 					     struct drm_crtc *crtc)
2476 {
2477 	uint32_t i;
2478 	struct drm_connector_state *new_con_state;
2479 	struct drm_connector *connector;
2480 	struct drm_crtc *crtc_from_state;
2481 
2482 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
2483 		crtc_from_state = new_con_state->crtc;
2484 
2485 		if (crtc_from_state == crtc)
2486 			return to_amdgpu_dm_connector(connector);
2487 	}
2488 
2489 	return NULL;
2490 }
2491 
2492 static void emulated_link_detect(struct dc_link *link)
2493 {
2494 	struct dc_sink_init_data sink_init_data = { 0 };
2495 	struct display_sink_capability sink_caps = { 0 };
2496 	enum dc_edid_status edid_status;
2497 	struct dc_context *dc_ctx = link->ctx;
2498 	struct dc_sink *sink = NULL;
2499 	struct dc_sink *prev_sink = NULL;
2500 
2501 	link->type = dc_connection_none;
2502 	prev_sink = link->local_sink;
2503 
2504 	if (prev_sink)
2505 		dc_sink_release(prev_sink);
2506 
2507 	switch (link->connector_signal) {
2508 	case SIGNAL_TYPE_HDMI_TYPE_A: {
2509 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2510 		sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2511 		break;
2512 	}
2513 
2514 	case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2515 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2516 		sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2517 		break;
2518 	}
2519 
2520 	case SIGNAL_TYPE_DVI_DUAL_LINK: {
2521 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2522 		sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2523 		break;
2524 	}
2525 
2526 	case SIGNAL_TYPE_LVDS: {
2527 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2528 		sink_caps.signal = SIGNAL_TYPE_LVDS;
2529 		break;
2530 	}
2531 
2532 	case SIGNAL_TYPE_EDP: {
2533 		sink_caps.transaction_type =
2534 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2535 		sink_caps.signal = SIGNAL_TYPE_EDP;
2536 		break;
2537 	}
2538 
2539 	case SIGNAL_TYPE_DISPLAY_PORT: {
2540 		sink_caps.transaction_type =
2541 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2542 		sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2543 		break;
2544 	}
2545 
2546 	default:
2547 		DC_ERROR("Invalid connector type! signal:%d\n",
2548 			link->connector_signal);
2549 		return;
2550 	}
2551 
2552 	sink_init_data.link = link;
2553 	sink_init_data.sink_signal = sink_caps.signal;
2554 
2555 	sink = dc_sink_create(&sink_init_data);
2556 	if (!sink) {
2557 		DC_ERROR("Failed to create sink!\n");
2558 		return;
2559 	}
2560 
2561 	/* dc_sink_create returns a new reference */
2562 	link->local_sink = sink;
2563 
2564 	edid_status = dm_helpers_read_local_edid(
2565 			link->ctx,
2566 			link,
2567 			sink);
2568 
2569 	if (edid_status != EDID_OK)
2570 		DC_ERROR("Failed to read EDID");
2571 
2572 }
2573 
2574 static void dm_gpureset_commit_state(struct dc_state *dc_state,
2575 				     struct amdgpu_display_manager *dm)
2576 {
2577 	struct {
2578 		struct dc_surface_update surface_updates[MAX_SURFACES];
2579 		struct dc_plane_info plane_infos[MAX_SURFACES];
2580 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
2581 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2582 		struct dc_stream_update stream_update;
2583 	} * bundle;
2584 	int k, m;
2585 
2586 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2587 
2588 	if (!bundle) {
2589 		dm_error("Failed to allocate update bundle\n");
2590 		goto cleanup;
2591 	}
2592 
2593 	for (k = 0; k < dc_state->stream_count; k++) {
2594 		bundle->stream_update.stream = dc_state->streams[k];
2595 
2596 		for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2597 			bundle->surface_updates[m].surface =
2598 				dc_state->stream_status->plane_states[m];
2599 			bundle->surface_updates[m].surface->force_full_update =
2600 				true;
2601 		}
2602 		dc_commit_updates_for_stream(
2603 			dm->dc, bundle->surface_updates,
2604 			dc_state->stream_status->plane_count,
2605 			dc_state->streams[k], &bundle->stream_update, dc_state);
2606 	}
2607 
2608 cleanup:
2609 	kfree(bundle);
2610 
2611 	return;
2612 }
2613 
2614 static void dm_set_dpms_off(struct dc_link *link, struct dm_crtc_state *acrtc_state)
2615 {
2616 	struct dc_stream_state *stream_state;
2617 	struct amdgpu_dm_connector *aconnector = link->priv;
2618 	struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2619 	struct dc_stream_update stream_update;
2620 	bool dpms_off = true;
2621 
2622 	memset(&stream_update, 0, sizeof(stream_update));
2623 	stream_update.dpms_off = &dpms_off;
2624 
2625 	mutex_lock(&adev->dm.dc_lock);
2626 	stream_state = dc_stream_find_from_link(link);
2627 
2628 	if (stream_state == NULL) {
2629 		DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2630 		mutex_unlock(&adev->dm.dc_lock);
2631 		return;
2632 	}
2633 
2634 	stream_update.stream = stream_state;
2635 	acrtc_state->force_dpms_off = true;
2636 	dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
2637 				     stream_state, &stream_update,
2638 				     stream_state->ctx->dc->current_state);
2639 	mutex_unlock(&adev->dm.dc_lock);
2640 }
2641 
2642 static int dm_resume(void *handle)
2643 {
2644 	struct amdgpu_device *adev = handle;
2645 	struct drm_device *ddev = adev_to_drm(adev);
2646 	struct amdgpu_display_manager *dm = &adev->dm;
2647 	struct amdgpu_dm_connector *aconnector;
2648 	struct drm_connector *connector;
2649 	struct drm_connector_list_iter iter;
2650 	struct drm_crtc *crtc;
2651 	struct drm_crtc_state *new_crtc_state;
2652 	struct dm_crtc_state *dm_new_crtc_state;
2653 	struct drm_plane *plane;
2654 	struct drm_plane_state *new_plane_state;
2655 	struct dm_plane_state *dm_new_plane_state;
2656 	struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2657 	enum dc_connection_type new_connection_type = dc_connection_none;
2658 	struct dc_state *dc_state;
2659 	int i, r, j;
2660 
2661 	if (amdgpu_in_reset(adev)) {
2662 		dc_state = dm->cached_dc_state;
2663 
2664 		/*
2665 		 * The dc->current_state is backed up into dm->cached_dc_state
2666 		 * before we commit 0 streams.
2667 		 *
2668 		 * DC will clear link encoder assignments on the real state
2669 		 * but the changes won't propagate over to the copy we made
2670 		 * before the 0 streams commit.
2671 		 *
2672 		 * DC expects that link encoder assignments are *not* valid
2673 		 * when committing a state, so as a workaround we can copy
2674 		 * off of the current state.
2675 		 *
2676 		 * We lose the previous assignments, but we had already
2677 		 * commit 0 streams anyway.
2678 		 */
2679 		link_enc_cfg_copy(adev->dm.dc->current_state, dc_state);
2680 
2681 		if (dc_enable_dmub_notifications(adev->dm.dc))
2682 			amdgpu_dm_outbox_init(adev);
2683 
2684 		r = dm_dmub_hw_init(adev);
2685 		if (r)
2686 			DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2687 
2688 		dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2689 		dc_resume(dm->dc);
2690 
2691 		amdgpu_dm_irq_resume_early(adev);
2692 
2693 		for (i = 0; i < dc_state->stream_count; i++) {
2694 			dc_state->streams[i]->mode_changed = true;
2695 			for (j = 0; j < dc_state->stream_status[i].plane_count; j++) {
2696 				dc_state->stream_status[i].plane_states[j]->update_flags.raw
2697 					= 0xffffffff;
2698 			}
2699 		}
2700 
2701 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
2702 
2703 		dm_gpureset_commit_state(dm->cached_dc_state, dm);
2704 
2705 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2706 
2707 		dc_release_state(dm->cached_dc_state);
2708 		dm->cached_dc_state = NULL;
2709 
2710 		amdgpu_dm_irq_resume_late(adev);
2711 
2712 		mutex_unlock(&dm->dc_lock);
2713 
2714 		return 0;
2715 	}
2716 	/* Recreate dc_state - DC invalidates it when setting power state to S3. */
2717 	dc_release_state(dm_state->context);
2718 	dm_state->context = dc_create_state(dm->dc);
2719 	/* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2720 	dc_resource_state_construct(dm->dc, dm_state->context);
2721 
2722 	/* Re-enable outbox interrupts for DPIA. */
2723 	if (dc_enable_dmub_notifications(adev->dm.dc))
2724 		amdgpu_dm_outbox_init(adev);
2725 
2726 	/* Before powering on DC we need to re-initialize DMUB. */
2727 	dm_dmub_hw_resume(adev);
2728 
2729 	/* power on hardware */
2730 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2731 
2732 	/* program HPD filter */
2733 	dc_resume(dm->dc);
2734 
2735 	/*
2736 	 * early enable HPD Rx IRQ, should be done before set mode as short
2737 	 * pulse interrupts are used for MST
2738 	 */
2739 	amdgpu_dm_irq_resume_early(adev);
2740 
2741 	/* On resume we need to rewrite the MSTM control bits to enable MST*/
2742 	s3_handle_mst(ddev, false);
2743 
2744 	/* Do detection*/
2745 	drm_connector_list_iter_begin(ddev, &iter);
2746 	drm_for_each_connector_iter(connector, &iter) {
2747 		aconnector = to_amdgpu_dm_connector(connector);
2748 
2749 		/*
2750 		 * this is the case when traversing through already created
2751 		 * MST connectors, should be skipped
2752 		 */
2753 		if (aconnector->dc_link &&
2754 		    aconnector->dc_link->type == dc_connection_mst_branch)
2755 			continue;
2756 
2757 		mutex_lock(&aconnector->hpd_lock);
2758 		if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2759 			DRM_ERROR("KMS: Failed to detect connector\n");
2760 
2761 		if (aconnector->base.force && new_connection_type == dc_connection_none)
2762 			emulated_link_detect(aconnector->dc_link);
2763 		else
2764 			dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2765 
2766 		if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2767 			aconnector->fake_enable = false;
2768 
2769 		if (aconnector->dc_sink)
2770 			dc_sink_release(aconnector->dc_sink);
2771 		aconnector->dc_sink = NULL;
2772 		amdgpu_dm_update_connector_after_detect(aconnector);
2773 		mutex_unlock(&aconnector->hpd_lock);
2774 	}
2775 	drm_connector_list_iter_end(&iter);
2776 
2777 	/* Force mode set in atomic commit */
2778 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2779 		new_crtc_state->active_changed = true;
2780 
2781 	/*
2782 	 * atomic_check is expected to create the dc states. We need to release
2783 	 * them here, since they were duplicated as part of the suspend
2784 	 * procedure.
2785 	 */
2786 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2787 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2788 		if (dm_new_crtc_state->stream) {
2789 			WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2790 			dc_stream_release(dm_new_crtc_state->stream);
2791 			dm_new_crtc_state->stream = NULL;
2792 		}
2793 	}
2794 
2795 	for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2796 		dm_new_plane_state = to_dm_plane_state(new_plane_state);
2797 		if (dm_new_plane_state->dc_state) {
2798 			WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2799 			dc_plane_state_release(dm_new_plane_state->dc_state);
2800 			dm_new_plane_state->dc_state = NULL;
2801 		}
2802 	}
2803 
2804 	drm_atomic_helper_resume(ddev, dm->cached_state);
2805 
2806 	dm->cached_state = NULL;
2807 
2808 	amdgpu_dm_irq_resume_late(adev);
2809 
2810 	amdgpu_dm_smu_write_watermarks_table(adev);
2811 
2812 	return 0;
2813 }
2814 
2815 /**
2816  * DOC: DM Lifecycle
2817  *
2818  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2819  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2820  * the base driver's device list to be initialized and torn down accordingly.
2821  *
2822  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2823  */
2824 
2825 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2826 	.name = "dm",
2827 	.early_init = dm_early_init,
2828 	.late_init = dm_late_init,
2829 	.sw_init = dm_sw_init,
2830 	.sw_fini = dm_sw_fini,
2831 	.early_fini = amdgpu_dm_early_fini,
2832 	.hw_init = dm_hw_init,
2833 	.hw_fini = dm_hw_fini,
2834 	.suspend = dm_suspend,
2835 	.resume = dm_resume,
2836 	.is_idle = dm_is_idle,
2837 	.wait_for_idle = dm_wait_for_idle,
2838 	.check_soft_reset = dm_check_soft_reset,
2839 	.soft_reset = dm_soft_reset,
2840 	.set_clockgating_state = dm_set_clockgating_state,
2841 	.set_powergating_state = dm_set_powergating_state,
2842 };
2843 
2844 const struct amdgpu_ip_block_version dm_ip_block =
2845 {
2846 	.type = AMD_IP_BLOCK_TYPE_DCE,
2847 	.major = 1,
2848 	.minor = 0,
2849 	.rev = 0,
2850 	.funcs = &amdgpu_dm_funcs,
2851 };
2852 
2853 
2854 /**
2855  * DOC: atomic
2856  *
2857  * *WIP*
2858  */
2859 
2860 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2861 	.fb_create = amdgpu_display_user_framebuffer_create,
2862 	.get_format_info = amd_get_format_info,
2863 	.output_poll_changed = drm_fb_helper_output_poll_changed,
2864 	.atomic_check = amdgpu_dm_atomic_check,
2865 	.atomic_commit = drm_atomic_helper_commit,
2866 };
2867 
2868 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2869 	.atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2870 };
2871 
2872 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2873 {
2874 	u32 max_avg, min_cll, max, min, q, r;
2875 	struct amdgpu_dm_backlight_caps *caps;
2876 	struct amdgpu_display_manager *dm;
2877 	struct drm_connector *conn_base;
2878 	struct amdgpu_device *adev;
2879 	struct dc_link *link = NULL;
2880 	static const u8 pre_computed_values[] = {
2881 		50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2882 		71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2883 	int i;
2884 
2885 	if (!aconnector || !aconnector->dc_link)
2886 		return;
2887 
2888 	link = aconnector->dc_link;
2889 	if (link->connector_signal != SIGNAL_TYPE_EDP)
2890 		return;
2891 
2892 	conn_base = &aconnector->base;
2893 	adev = drm_to_adev(conn_base->dev);
2894 	dm = &adev->dm;
2895 	for (i = 0; i < dm->num_of_edps; i++) {
2896 		if (link == dm->backlight_link[i])
2897 			break;
2898 	}
2899 	if (i >= dm->num_of_edps)
2900 		return;
2901 	caps = &dm->backlight_caps[i];
2902 	caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2903 	caps->aux_support = false;
2904 	max_avg = conn_base->hdr_sink_metadata.hdmi_type1.max_fall;
2905 	min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2906 
2907 	if (caps->ext_caps->bits.oled == 1 /*||
2908 	    caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2909 	    caps->ext_caps->bits.hdr_aux_backlight_control == 1*/)
2910 		caps->aux_support = true;
2911 
2912 	if (amdgpu_backlight == 0)
2913 		caps->aux_support = false;
2914 	else if (amdgpu_backlight == 1)
2915 		caps->aux_support = true;
2916 
2917 	/* From the specification (CTA-861-G), for calculating the maximum
2918 	 * luminance we need to use:
2919 	 *	Luminance = 50*2**(CV/32)
2920 	 * Where CV is a one-byte value.
2921 	 * For calculating this expression we may need float point precision;
2922 	 * to avoid this complexity level, we take advantage that CV is divided
2923 	 * by a constant. From the Euclids division algorithm, we know that CV
2924 	 * can be written as: CV = 32*q + r. Next, we replace CV in the
2925 	 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2926 	 * need to pre-compute the value of r/32. For pre-computing the values
2927 	 * We just used the following Ruby line:
2928 	 *	(0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2929 	 * The results of the above expressions can be verified at
2930 	 * pre_computed_values.
2931 	 */
2932 	q = max_avg >> 5;
2933 	r = max_avg % 32;
2934 	max = (1 << q) * pre_computed_values[r];
2935 
2936 	// min luminance: maxLum * (CV/255)^2 / 100
2937 	q = DIV_ROUND_CLOSEST(min_cll, 255);
2938 	min = max * DIV_ROUND_CLOSEST((q * q), 100);
2939 
2940 	caps->aux_max_input_signal = max;
2941 	caps->aux_min_input_signal = min;
2942 }
2943 
2944 void amdgpu_dm_update_connector_after_detect(
2945 		struct amdgpu_dm_connector *aconnector)
2946 {
2947 	struct drm_connector *connector = &aconnector->base;
2948 	struct drm_device *dev = connector->dev;
2949 	struct dc_sink *sink;
2950 
2951 	/* MST handled by drm_mst framework */
2952 	if (aconnector->mst_mgr.mst_state == true)
2953 		return;
2954 
2955 	sink = aconnector->dc_link->local_sink;
2956 	if (sink)
2957 		dc_sink_retain(sink);
2958 
2959 	/*
2960 	 * Edid mgmt connector gets first update only in mode_valid hook and then
2961 	 * the connector sink is set to either fake or physical sink depends on link status.
2962 	 * Skip if already done during boot.
2963 	 */
2964 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2965 			&& aconnector->dc_em_sink) {
2966 
2967 		/*
2968 		 * For S3 resume with headless use eml_sink to fake stream
2969 		 * because on resume connector->sink is set to NULL
2970 		 */
2971 		mutex_lock(&dev->mode_config.mutex);
2972 
2973 		if (sink) {
2974 			if (aconnector->dc_sink) {
2975 				amdgpu_dm_update_freesync_caps(connector, NULL);
2976 				/*
2977 				 * retain and release below are used to
2978 				 * bump up refcount for sink because the link doesn't point
2979 				 * to it anymore after disconnect, so on next crtc to connector
2980 				 * reshuffle by UMD we will get into unwanted dc_sink release
2981 				 */
2982 				dc_sink_release(aconnector->dc_sink);
2983 			}
2984 			aconnector->dc_sink = sink;
2985 			dc_sink_retain(aconnector->dc_sink);
2986 			amdgpu_dm_update_freesync_caps(connector,
2987 					aconnector->edid);
2988 		} else {
2989 			amdgpu_dm_update_freesync_caps(connector, NULL);
2990 			if (!aconnector->dc_sink) {
2991 				aconnector->dc_sink = aconnector->dc_em_sink;
2992 				dc_sink_retain(aconnector->dc_sink);
2993 			}
2994 		}
2995 
2996 		mutex_unlock(&dev->mode_config.mutex);
2997 
2998 		if (sink)
2999 			dc_sink_release(sink);
3000 		return;
3001 	}
3002 
3003 	/*
3004 	 * TODO: temporary guard to look for proper fix
3005 	 * if this sink is MST sink, we should not do anything
3006 	 */
3007 	if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
3008 		dc_sink_release(sink);
3009 		return;
3010 	}
3011 
3012 	if (aconnector->dc_sink == sink) {
3013 		/*
3014 		 * We got a DP short pulse (Link Loss, DP CTS, etc...).
3015 		 * Do nothing!!
3016 		 */
3017 		DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
3018 				aconnector->connector_id);
3019 		if (sink)
3020 			dc_sink_release(sink);
3021 		return;
3022 	}
3023 
3024 	DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
3025 		aconnector->connector_id, aconnector->dc_sink, sink);
3026 
3027 	mutex_lock(&dev->mode_config.mutex);
3028 
3029 	/*
3030 	 * 1. Update status of the drm connector
3031 	 * 2. Send an event and let userspace tell us what to do
3032 	 */
3033 	if (sink) {
3034 		/*
3035 		 * TODO: check if we still need the S3 mode update workaround.
3036 		 * If yes, put it here.
3037 		 */
3038 		if (aconnector->dc_sink) {
3039 			amdgpu_dm_update_freesync_caps(connector, NULL);
3040 			dc_sink_release(aconnector->dc_sink);
3041 		}
3042 
3043 		aconnector->dc_sink = sink;
3044 		dc_sink_retain(aconnector->dc_sink);
3045 		if (sink->dc_edid.length == 0) {
3046 			aconnector->edid = NULL;
3047 			if (aconnector->dc_link->aux_mode) {
3048 				drm_dp_cec_unset_edid(
3049 					&aconnector->dm_dp_aux.aux);
3050 			}
3051 		} else {
3052 			aconnector->edid =
3053 				(struct edid *)sink->dc_edid.raw_edid;
3054 
3055 			if (aconnector->dc_link->aux_mode)
3056 				drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
3057 						    aconnector->edid);
3058 		}
3059 
3060 		drm_connector_update_edid_property(connector, aconnector->edid);
3061 		amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
3062 		update_connector_ext_caps(aconnector);
3063 	} else {
3064 		drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
3065 		amdgpu_dm_update_freesync_caps(connector, NULL);
3066 		drm_connector_update_edid_property(connector, NULL);
3067 		aconnector->num_modes = 0;
3068 		dc_sink_release(aconnector->dc_sink);
3069 		aconnector->dc_sink = NULL;
3070 		aconnector->edid = NULL;
3071 #ifdef CONFIG_DRM_AMD_DC_HDCP
3072 		/* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
3073 		if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
3074 			connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
3075 #endif
3076 	}
3077 
3078 	mutex_unlock(&dev->mode_config.mutex);
3079 
3080 	update_subconnector_property(aconnector);
3081 
3082 	if (sink)
3083 		dc_sink_release(sink);
3084 }
3085 
3086 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
3087 {
3088 	struct drm_connector *connector = &aconnector->base;
3089 	struct drm_device *dev = connector->dev;
3090 	enum dc_connection_type new_connection_type = dc_connection_none;
3091 	struct amdgpu_device *adev = drm_to_adev(dev);
3092 	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
3093 	struct dm_crtc_state *dm_crtc_state = NULL;
3094 
3095 	if (adev->dm.disable_hpd_irq)
3096 		return;
3097 
3098 	if (dm_con_state->base.state && dm_con_state->base.crtc)
3099 		dm_crtc_state = to_dm_crtc_state(drm_atomic_get_crtc_state(
3100 					dm_con_state->base.state,
3101 					dm_con_state->base.crtc));
3102 	/*
3103 	 * In case of failure or MST no need to update connector status or notify the OS
3104 	 * since (for MST case) MST does this in its own context.
3105 	 */
3106 	mutex_lock(&aconnector->hpd_lock);
3107 
3108 #ifdef CONFIG_DRM_AMD_DC_HDCP
3109 	if (adev->dm.hdcp_workqueue) {
3110 		hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
3111 		dm_con_state->update_hdcp = true;
3112 	}
3113 #endif
3114 	if (aconnector->fake_enable)
3115 		aconnector->fake_enable = false;
3116 
3117 	if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
3118 		DRM_ERROR("KMS: Failed to detect connector\n");
3119 
3120 	if (aconnector->base.force && new_connection_type == dc_connection_none) {
3121 		emulated_link_detect(aconnector->dc_link);
3122 
3123 		drm_modeset_lock_all(dev);
3124 		dm_restore_drm_connector_state(dev, connector);
3125 		drm_modeset_unlock_all(dev);
3126 
3127 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3128 			drm_kms_helper_connector_hotplug_event(connector);
3129 
3130 	} else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
3131 		if (new_connection_type == dc_connection_none &&
3132 		    aconnector->dc_link->type == dc_connection_none &&
3133 		    dm_crtc_state)
3134 			dm_set_dpms_off(aconnector->dc_link, dm_crtc_state);
3135 
3136 		amdgpu_dm_update_connector_after_detect(aconnector);
3137 
3138 		drm_modeset_lock_all(dev);
3139 		dm_restore_drm_connector_state(dev, connector);
3140 		drm_modeset_unlock_all(dev);
3141 
3142 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3143 			drm_kms_helper_connector_hotplug_event(connector);
3144 	}
3145 	mutex_unlock(&aconnector->hpd_lock);
3146 
3147 }
3148 
3149 static void handle_hpd_irq(void *param)
3150 {
3151 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3152 
3153 	handle_hpd_irq_helper(aconnector);
3154 
3155 }
3156 
3157 static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
3158 {
3159 	uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
3160 	uint8_t dret;
3161 	bool new_irq_handled = false;
3162 	int dpcd_addr;
3163 	int dpcd_bytes_to_read;
3164 
3165 	const int max_process_count = 30;
3166 	int process_count = 0;
3167 
3168 	const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
3169 
3170 	if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
3171 		dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
3172 		/* DPCD 0x200 - 0x201 for downstream IRQ */
3173 		dpcd_addr = DP_SINK_COUNT;
3174 	} else {
3175 		dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
3176 		/* DPCD 0x2002 - 0x2005 for downstream IRQ */
3177 		dpcd_addr = DP_SINK_COUNT_ESI;
3178 	}
3179 
3180 	dret = drm_dp_dpcd_read(
3181 		&aconnector->dm_dp_aux.aux,
3182 		dpcd_addr,
3183 		esi,
3184 		dpcd_bytes_to_read);
3185 
3186 	while (dret == dpcd_bytes_to_read &&
3187 		process_count < max_process_count) {
3188 		uint8_t retry;
3189 		dret = 0;
3190 
3191 		process_count++;
3192 
3193 		DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
3194 		/* handle HPD short pulse irq */
3195 		if (aconnector->mst_mgr.mst_state)
3196 			drm_dp_mst_hpd_irq(
3197 				&aconnector->mst_mgr,
3198 				esi,
3199 				&new_irq_handled);
3200 
3201 		if (new_irq_handled) {
3202 			/* ACK at DPCD to notify down stream */
3203 			const int ack_dpcd_bytes_to_write =
3204 				dpcd_bytes_to_read - 1;
3205 
3206 			for (retry = 0; retry < 3; retry++) {
3207 				uint8_t wret;
3208 
3209 				wret = drm_dp_dpcd_write(
3210 					&aconnector->dm_dp_aux.aux,
3211 					dpcd_addr + 1,
3212 					&esi[1],
3213 					ack_dpcd_bytes_to_write);
3214 				if (wret == ack_dpcd_bytes_to_write)
3215 					break;
3216 			}
3217 
3218 			/* check if there is new irq to be handled */
3219 			dret = drm_dp_dpcd_read(
3220 				&aconnector->dm_dp_aux.aux,
3221 				dpcd_addr,
3222 				esi,
3223 				dpcd_bytes_to_read);
3224 
3225 			new_irq_handled = false;
3226 		} else {
3227 			break;
3228 		}
3229 	}
3230 
3231 	if (process_count == max_process_count)
3232 		DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
3233 }
3234 
3235 static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq,
3236 							union hpd_irq_data hpd_irq_data)
3237 {
3238 	struct hpd_rx_irq_offload_work *offload_work =
3239 				kzalloc(sizeof(*offload_work), GFP_KERNEL);
3240 
3241 	if (!offload_work) {
3242 		DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n");
3243 		return;
3244 	}
3245 
3246 	INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work);
3247 	offload_work->data = hpd_irq_data;
3248 	offload_work->offload_wq = offload_wq;
3249 
3250 	queue_work(offload_wq->wq, &offload_work->work);
3251 	DRM_DEBUG_KMS("queue work to handle hpd_rx offload work");
3252 }
3253 
3254 static void handle_hpd_rx_irq(void *param)
3255 {
3256 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3257 	struct drm_connector *connector = &aconnector->base;
3258 	struct drm_device *dev = connector->dev;
3259 	struct dc_link *dc_link = aconnector->dc_link;
3260 	bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
3261 	bool result = false;
3262 	enum dc_connection_type new_connection_type = dc_connection_none;
3263 	struct amdgpu_device *adev = drm_to_adev(dev);
3264 	union hpd_irq_data hpd_irq_data;
3265 	bool link_loss = false;
3266 	bool has_left_work = false;
3267 	int idx = aconnector->base.index;
3268 	struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx];
3269 
3270 	memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
3271 
3272 	if (adev->dm.disable_hpd_irq)
3273 		return;
3274 
3275 	/*
3276 	 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
3277 	 * conflict, after implement i2c helper, this mutex should be
3278 	 * retired.
3279 	 */
3280 	mutex_lock(&aconnector->hpd_lock);
3281 
3282 	result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data,
3283 						&link_loss, true, &has_left_work);
3284 
3285 	if (!has_left_work)
3286 		goto out;
3287 
3288 	if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
3289 		schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3290 		goto out;
3291 	}
3292 
3293 	if (dc_link_dp_allow_hpd_rx_irq(dc_link)) {
3294 		if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
3295 			hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
3296 			dm_handle_mst_sideband_msg(aconnector);
3297 			goto out;
3298 		}
3299 
3300 		if (link_loss) {
3301 			bool skip = false;
3302 
3303 			spin_lock(&offload_wq->offload_lock);
3304 			skip = offload_wq->is_handling_link_loss;
3305 
3306 			if (!skip)
3307 				offload_wq->is_handling_link_loss = true;
3308 
3309 			spin_unlock(&offload_wq->offload_lock);
3310 
3311 			if (!skip)
3312 				schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3313 
3314 			goto out;
3315 		}
3316 	}
3317 
3318 out:
3319 	if (result && !is_mst_root_connector) {
3320 		/* Downstream Port status changed. */
3321 		if (!dc_link_detect_sink(dc_link, &new_connection_type))
3322 			DRM_ERROR("KMS: Failed to detect connector\n");
3323 
3324 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
3325 			emulated_link_detect(dc_link);
3326 
3327 			if (aconnector->fake_enable)
3328 				aconnector->fake_enable = false;
3329 
3330 			amdgpu_dm_update_connector_after_detect(aconnector);
3331 
3332 
3333 			drm_modeset_lock_all(dev);
3334 			dm_restore_drm_connector_state(dev, connector);
3335 			drm_modeset_unlock_all(dev);
3336 
3337 			drm_kms_helper_connector_hotplug_event(connector);
3338 		} else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
3339 
3340 			if (aconnector->fake_enable)
3341 				aconnector->fake_enable = false;
3342 
3343 			amdgpu_dm_update_connector_after_detect(aconnector);
3344 
3345 
3346 			drm_modeset_lock_all(dev);
3347 			dm_restore_drm_connector_state(dev, connector);
3348 			drm_modeset_unlock_all(dev);
3349 
3350 			drm_kms_helper_connector_hotplug_event(connector);
3351 		}
3352 	}
3353 #ifdef CONFIG_DRM_AMD_DC_HDCP
3354 	if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
3355 		if (adev->dm.hdcp_workqueue)
3356 			hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
3357 	}
3358 #endif
3359 
3360 	if (dc_link->type != dc_connection_mst_branch)
3361 		drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
3362 
3363 	mutex_unlock(&aconnector->hpd_lock);
3364 }
3365 
3366 static void register_hpd_handlers(struct amdgpu_device *adev)
3367 {
3368 	struct drm_device *dev = adev_to_drm(adev);
3369 	struct drm_connector *connector;
3370 	struct amdgpu_dm_connector *aconnector;
3371 	const struct dc_link *dc_link;
3372 	struct dc_interrupt_params int_params = {0};
3373 
3374 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3375 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3376 
3377 	list_for_each_entry(connector,
3378 			&dev->mode_config.connector_list, head)	{
3379 
3380 		aconnector = to_amdgpu_dm_connector(connector);
3381 		dc_link = aconnector->dc_link;
3382 
3383 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
3384 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3385 			int_params.irq_source = dc_link->irq_source_hpd;
3386 
3387 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
3388 					handle_hpd_irq,
3389 					(void *) aconnector);
3390 		}
3391 
3392 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
3393 
3394 			/* Also register for DP short pulse (hpd_rx). */
3395 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3396 			int_params.irq_source =	dc_link->irq_source_hpd_rx;
3397 
3398 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
3399 					handle_hpd_rx_irq,
3400 					(void *) aconnector);
3401 
3402 			if (adev->dm.hpd_rx_offload_wq)
3403 				adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
3404 					aconnector;
3405 		}
3406 	}
3407 }
3408 
3409 #if defined(CONFIG_DRM_AMD_DC_SI)
3410 /* Register IRQ sources and initialize IRQ callbacks */
3411 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
3412 {
3413 	struct dc *dc = adev->dm.dc;
3414 	struct common_irq_params *c_irq_params;
3415 	struct dc_interrupt_params int_params = {0};
3416 	int r;
3417 	int i;
3418 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3419 
3420 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3421 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3422 
3423 	/*
3424 	 * Actions of amdgpu_irq_add_id():
3425 	 * 1. Register a set() function with base driver.
3426 	 *    Base driver will call set() function to enable/disable an
3427 	 *    interrupt in DC hardware.
3428 	 * 2. Register amdgpu_dm_irq_handler().
3429 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3430 	 *    coming from DC hardware.
3431 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3432 	 *    for acknowledging and handling. */
3433 
3434 	/* Use VBLANK interrupt */
3435 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
3436 		r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
3437 		if (r) {
3438 			DRM_ERROR("Failed to add crtc irq id!\n");
3439 			return r;
3440 		}
3441 
3442 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3443 		int_params.irq_source =
3444 			dc_interrupt_to_irq_source(dc, i+1 , 0);
3445 
3446 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3447 
3448 		c_irq_params->adev = adev;
3449 		c_irq_params->irq_src = int_params.irq_source;
3450 
3451 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3452 				dm_crtc_high_irq, c_irq_params);
3453 	}
3454 
3455 	/* Use GRPH_PFLIP interrupt */
3456 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3457 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3458 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3459 		if (r) {
3460 			DRM_ERROR("Failed to add page flip irq id!\n");
3461 			return r;
3462 		}
3463 
3464 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3465 		int_params.irq_source =
3466 			dc_interrupt_to_irq_source(dc, i, 0);
3467 
3468 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3469 
3470 		c_irq_params->adev = adev;
3471 		c_irq_params->irq_src = int_params.irq_source;
3472 
3473 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3474 				dm_pflip_high_irq, c_irq_params);
3475 
3476 	}
3477 
3478 	/* HPD */
3479 	r = amdgpu_irq_add_id(adev, client_id,
3480 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3481 	if (r) {
3482 		DRM_ERROR("Failed to add hpd irq id!\n");
3483 		return r;
3484 	}
3485 
3486 	register_hpd_handlers(adev);
3487 
3488 	return 0;
3489 }
3490 #endif
3491 
3492 /* Register IRQ sources and initialize IRQ callbacks */
3493 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
3494 {
3495 	struct dc *dc = adev->dm.dc;
3496 	struct common_irq_params *c_irq_params;
3497 	struct dc_interrupt_params int_params = {0};
3498 	int r;
3499 	int i;
3500 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3501 
3502 	if (adev->family >= AMDGPU_FAMILY_AI)
3503 		client_id = SOC15_IH_CLIENTID_DCE;
3504 
3505 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3506 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3507 
3508 	/*
3509 	 * Actions of amdgpu_irq_add_id():
3510 	 * 1. Register a set() function with base driver.
3511 	 *    Base driver will call set() function to enable/disable an
3512 	 *    interrupt in DC hardware.
3513 	 * 2. Register amdgpu_dm_irq_handler().
3514 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3515 	 *    coming from DC hardware.
3516 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3517 	 *    for acknowledging and handling. */
3518 
3519 	/* Use VBLANK interrupt */
3520 	for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
3521 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
3522 		if (r) {
3523 			DRM_ERROR("Failed to add crtc irq id!\n");
3524 			return r;
3525 		}
3526 
3527 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3528 		int_params.irq_source =
3529 			dc_interrupt_to_irq_source(dc, i, 0);
3530 
3531 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3532 
3533 		c_irq_params->adev = adev;
3534 		c_irq_params->irq_src = int_params.irq_source;
3535 
3536 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3537 				dm_crtc_high_irq, c_irq_params);
3538 	}
3539 
3540 	/* Use VUPDATE interrupt */
3541 	for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
3542 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
3543 		if (r) {
3544 			DRM_ERROR("Failed to add vupdate irq id!\n");
3545 			return r;
3546 		}
3547 
3548 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3549 		int_params.irq_source =
3550 			dc_interrupt_to_irq_source(dc, i, 0);
3551 
3552 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3553 
3554 		c_irq_params->adev = adev;
3555 		c_irq_params->irq_src = int_params.irq_source;
3556 
3557 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3558 				dm_vupdate_high_irq, c_irq_params);
3559 	}
3560 
3561 	/* Use GRPH_PFLIP interrupt */
3562 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3563 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3564 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3565 		if (r) {
3566 			DRM_ERROR("Failed to add page flip irq id!\n");
3567 			return r;
3568 		}
3569 
3570 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3571 		int_params.irq_source =
3572 			dc_interrupt_to_irq_source(dc, i, 0);
3573 
3574 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3575 
3576 		c_irq_params->adev = adev;
3577 		c_irq_params->irq_src = int_params.irq_source;
3578 
3579 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3580 				dm_pflip_high_irq, c_irq_params);
3581 
3582 	}
3583 
3584 	/* HPD */
3585 	r = amdgpu_irq_add_id(adev, client_id,
3586 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3587 	if (r) {
3588 		DRM_ERROR("Failed to add hpd irq id!\n");
3589 		return r;
3590 	}
3591 
3592 	register_hpd_handlers(adev);
3593 
3594 	return 0;
3595 }
3596 
3597 /* Register IRQ sources and initialize IRQ callbacks */
3598 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3599 {
3600 	struct dc *dc = adev->dm.dc;
3601 	struct common_irq_params *c_irq_params;
3602 	struct dc_interrupt_params int_params = {0};
3603 	int r;
3604 	int i;
3605 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3606 	static const unsigned int vrtl_int_srcid[] = {
3607 		DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3608 		DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3609 		DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3610 		DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3611 		DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3612 		DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3613 	};
3614 #endif
3615 
3616 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3617 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3618 
3619 	/*
3620 	 * Actions of amdgpu_irq_add_id():
3621 	 * 1. Register a set() function with base driver.
3622 	 *    Base driver will call set() function to enable/disable an
3623 	 *    interrupt in DC hardware.
3624 	 * 2. Register amdgpu_dm_irq_handler().
3625 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3626 	 *    coming from DC hardware.
3627 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3628 	 *    for acknowledging and handling.
3629 	 */
3630 
3631 	/* Use VSTARTUP interrupt */
3632 	for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3633 			i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3634 			i++) {
3635 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
3636 
3637 		if (r) {
3638 			DRM_ERROR("Failed to add crtc irq id!\n");
3639 			return r;
3640 		}
3641 
3642 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3643 		int_params.irq_source =
3644 			dc_interrupt_to_irq_source(dc, i, 0);
3645 
3646 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3647 
3648 		c_irq_params->adev = adev;
3649 		c_irq_params->irq_src = int_params.irq_source;
3650 
3651 		amdgpu_dm_irq_register_interrupt(
3652 			adev, &int_params, dm_crtc_high_irq, c_irq_params);
3653 	}
3654 
3655 	/* Use otg vertical line interrupt */
3656 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3657 	for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3658 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3659 				vrtl_int_srcid[i], &adev->vline0_irq);
3660 
3661 		if (r) {
3662 			DRM_ERROR("Failed to add vline0 irq id!\n");
3663 			return r;
3664 		}
3665 
3666 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3667 		int_params.irq_source =
3668 			dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3669 
3670 		if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3671 			DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3672 			break;
3673 		}
3674 
3675 		c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3676 					- DC_IRQ_SOURCE_DC1_VLINE0];
3677 
3678 		c_irq_params->adev = adev;
3679 		c_irq_params->irq_src = int_params.irq_source;
3680 
3681 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3682 				dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3683 	}
3684 #endif
3685 
3686 	/* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3687 	 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3688 	 * to trigger at end of each vblank, regardless of state of the lock,
3689 	 * matching DCE behaviour.
3690 	 */
3691 	for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3692 	     i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3693 	     i++) {
3694 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3695 
3696 		if (r) {
3697 			DRM_ERROR("Failed to add vupdate irq id!\n");
3698 			return r;
3699 		}
3700 
3701 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3702 		int_params.irq_source =
3703 			dc_interrupt_to_irq_source(dc, i, 0);
3704 
3705 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3706 
3707 		c_irq_params->adev = adev;
3708 		c_irq_params->irq_src = int_params.irq_source;
3709 
3710 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3711 				dm_vupdate_high_irq, c_irq_params);
3712 	}
3713 
3714 	/* Use GRPH_PFLIP interrupt */
3715 	for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3716 			i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + dc->caps.max_otg_num - 1;
3717 			i++) {
3718 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
3719 		if (r) {
3720 			DRM_ERROR("Failed to add page flip irq id!\n");
3721 			return r;
3722 		}
3723 
3724 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3725 		int_params.irq_source =
3726 			dc_interrupt_to_irq_source(dc, i, 0);
3727 
3728 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3729 
3730 		c_irq_params->adev = adev;
3731 		c_irq_params->irq_src = int_params.irq_source;
3732 
3733 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3734 				dm_pflip_high_irq, c_irq_params);
3735 
3736 	}
3737 
3738 	/* HPD */
3739 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3740 			&adev->hpd_irq);
3741 	if (r) {
3742 		DRM_ERROR("Failed to add hpd irq id!\n");
3743 		return r;
3744 	}
3745 
3746 	register_hpd_handlers(adev);
3747 
3748 	return 0;
3749 }
3750 /* Register Outbox IRQ sources and initialize IRQ callbacks */
3751 static int register_outbox_irq_handlers(struct amdgpu_device *adev)
3752 {
3753 	struct dc *dc = adev->dm.dc;
3754 	struct common_irq_params *c_irq_params;
3755 	struct dc_interrupt_params int_params = {0};
3756 	int r, i;
3757 
3758 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3759 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3760 
3761 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
3762 			&adev->dmub_outbox_irq);
3763 	if (r) {
3764 		DRM_ERROR("Failed to add outbox irq id!\n");
3765 		return r;
3766 	}
3767 
3768 	if (dc->ctx->dmub_srv) {
3769 		i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
3770 		int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3771 		int_params.irq_source =
3772 		dc_interrupt_to_irq_source(dc, i, 0);
3773 
3774 		c_irq_params = &adev->dm.dmub_outbox_params[0];
3775 
3776 		c_irq_params->adev = adev;
3777 		c_irq_params->irq_src = int_params.irq_source;
3778 
3779 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3780 				dm_dmub_outbox1_low_irq, c_irq_params);
3781 	}
3782 
3783 	return 0;
3784 }
3785 
3786 /*
3787  * Acquires the lock for the atomic state object and returns
3788  * the new atomic state.
3789  *
3790  * This should only be called during atomic check.
3791  */
3792 int dm_atomic_get_state(struct drm_atomic_state *state,
3793 			struct dm_atomic_state **dm_state)
3794 {
3795 	struct drm_device *dev = state->dev;
3796 	struct amdgpu_device *adev = drm_to_adev(dev);
3797 	struct amdgpu_display_manager *dm = &adev->dm;
3798 	struct drm_private_state *priv_state;
3799 
3800 	if (*dm_state)
3801 		return 0;
3802 
3803 	priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3804 	if (IS_ERR(priv_state))
3805 		return PTR_ERR(priv_state);
3806 
3807 	*dm_state = to_dm_atomic_state(priv_state);
3808 
3809 	return 0;
3810 }
3811 
3812 static struct dm_atomic_state *
3813 dm_atomic_get_new_state(struct drm_atomic_state *state)
3814 {
3815 	struct drm_device *dev = state->dev;
3816 	struct amdgpu_device *adev = drm_to_adev(dev);
3817 	struct amdgpu_display_manager *dm = &adev->dm;
3818 	struct drm_private_obj *obj;
3819 	struct drm_private_state *new_obj_state;
3820 	int i;
3821 
3822 	for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3823 		if (obj->funcs == dm->atomic_obj.funcs)
3824 			return to_dm_atomic_state(new_obj_state);
3825 	}
3826 
3827 	return NULL;
3828 }
3829 
3830 static struct drm_private_state *
3831 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3832 {
3833 	struct dm_atomic_state *old_state, *new_state;
3834 
3835 	new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3836 	if (!new_state)
3837 		return NULL;
3838 
3839 	__drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3840 
3841 	old_state = to_dm_atomic_state(obj->state);
3842 
3843 	if (old_state && old_state->context)
3844 		new_state->context = dc_copy_state(old_state->context);
3845 
3846 	if (!new_state->context) {
3847 		kfree(new_state);
3848 		return NULL;
3849 	}
3850 
3851 	return &new_state->base;
3852 }
3853 
3854 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3855 				    struct drm_private_state *state)
3856 {
3857 	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3858 
3859 	if (dm_state && dm_state->context)
3860 		dc_release_state(dm_state->context);
3861 
3862 	kfree(dm_state);
3863 }
3864 
3865 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3866 	.atomic_duplicate_state = dm_atomic_duplicate_state,
3867 	.atomic_destroy_state = dm_atomic_destroy_state,
3868 };
3869 
3870 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3871 {
3872 	struct dm_atomic_state *state;
3873 	int r;
3874 
3875 	adev->mode_info.mode_config_initialized = true;
3876 
3877 	adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3878 	adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3879 
3880 	adev_to_drm(adev)->mode_config.max_width = 16384;
3881 	adev_to_drm(adev)->mode_config.max_height = 16384;
3882 
3883 	adev_to_drm(adev)->mode_config.preferred_depth = 24;
3884 	/* disable prefer shadow for now due to hibernation issues */
3885 	adev_to_drm(adev)->mode_config.prefer_shadow = 0;
3886 	/* indicates support for immediate flip */
3887 	adev_to_drm(adev)->mode_config.async_page_flip = true;
3888 
3889 	adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3890 
3891 	state = kzalloc(sizeof(*state), GFP_KERNEL);
3892 	if (!state)
3893 		return -ENOMEM;
3894 
3895 	state->context = dc_create_state(adev->dm.dc);
3896 	if (!state->context) {
3897 		kfree(state);
3898 		return -ENOMEM;
3899 	}
3900 
3901 	dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3902 
3903 	drm_atomic_private_obj_init(adev_to_drm(adev),
3904 				    &adev->dm.atomic_obj,
3905 				    &state->base,
3906 				    &dm_atomic_state_funcs);
3907 
3908 	r = amdgpu_display_modeset_create_props(adev);
3909 	if (r) {
3910 		dc_release_state(state->context);
3911 		kfree(state);
3912 		return r;
3913 	}
3914 
3915 	r = amdgpu_dm_audio_init(adev);
3916 	if (r) {
3917 		dc_release_state(state->context);
3918 		kfree(state);
3919 		return r;
3920 	}
3921 
3922 	return 0;
3923 }
3924 
3925 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3926 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3927 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3928 
3929 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3930 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3931 
3932 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
3933 					    int bl_idx)
3934 {
3935 #if defined(CONFIG_ACPI)
3936 	struct amdgpu_dm_backlight_caps caps;
3937 
3938 	memset(&caps, 0, sizeof(caps));
3939 
3940 	if (dm->backlight_caps[bl_idx].caps_valid)
3941 		return;
3942 
3943 	amdgpu_acpi_get_backlight_caps(&caps);
3944 	if (caps.caps_valid) {
3945 		dm->backlight_caps[bl_idx].caps_valid = true;
3946 		if (caps.aux_support)
3947 			return;
3948 		dm->backlight_caps[bl_idx].min_input_signal = caps.min_input_signal;
3949 		dm->backlight_caps[bl_idx].max_input_signal = caps.max_input_signal;
3950 	} else {
3951 		dm->backlight_caps[bl_idx].min_input_signal =
3952 				AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3953 		dm->backlight_caps[bl_idx].max_input_signal =
3954 				AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3955 	}
3956 #else
3957 	if (dm->backlight_caps[bl_idx].aux_support)
3958 		return;
3959 
3960 	dm->backlight_caps[bl_idx].min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3961 	dm->backlight_caps[bl_idx].max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3962 #endif
3963 }
3964 
3965 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3966 				unsigned *min, unsigned *max)
3967 {
3968 	if (!caps)
3969 		return 0;
3970 
3971 	if (caps->aux_support) {
3972 		// Firmware limits are in nits, DC API wants millinits.
3973 		*max = 1000 * caps->aux_max_input_signal;
3974 		*min = 1000 * caps->aux_min_input_signal;
3975 	} else {
3976 		// Firmware limits are 8-bit, PWM control is 16-bit.
3977 		*max = 0x101 * caps->max_input_signal;
3978 		*min = 0x101 * caps->min_input_signal;
3979 	}
3980 	return 1;
3981 }
3982 
3983 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3984 					uint32_t brightness)
3985 {
3986 	unsigned min, max;
3987 
3988 	if (!get_brightness_range(caps, &min, &max))
3989 		return brightness;
3990 
3991 	// Rescale 0..255 to min..max
3992 	return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3993 				       AMDGPU_MAX_BL_LEVEL);
3994 }
3995 
3996 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3997 				      uint32_t brightness)
3998 {
3999 	unsigned min, max;
4000 
4001 	if (!get_brightness_range(caps, &min, &max))
4002 		return brightness;
4003 
4004 	if (brightness < min)
4005 		return 0;
4006 	// Rescale min..max to 0..255
4007 	return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
4008 				 max - min);
4009 }
4010 
4011 static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
4012 					 int bl_idx,
4013 					 u32 user_brightness)
4014 {
4015 	struct amdgpu_dm_backlight_caps caps;
4016 	struct dc_link *link;
4017 	u32 brightness;
4018 	bool rc;
4019 
4020 	amdgpu_dm_update_backlight_caps(dm, bl_idx);
4021 	caps = dm->backlight_caps[bl_idx];
4022 
4023 	dm->brightness[bl_idx] = user_brightness;
4024 	/* update scratch register */
4025 	if (bl_idx == 0)
4026 		amdgpu_atombios_scratch_regs_set_backlight_level(dm->adev, dm->brightness[bl_idx]);
4027 	brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]);
4028 	link = (struct dc_link *)dm->backlight_link[bl_idx];
4029 
4030 	/* Change brightness based on AUX property */
4031 	if (caps.aux_support) {
4032 		rc = dc_link_set_backlight_level_nits(link, true, brightness,
4033 						      AUX_BL_DEFAULT_TRANSITION_TIME_MS);
4034 		if (!rc)
4035 			DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", bl_idx);
4036 	} else {
4037 		rc = dc_link_set_backlight_level(link, brightness, 0);
4038 		if (!rc)
4039 			DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx);
4040 	}
4041 
4042 	if (rc)
4043 		dm->actual_brightness[bl_idx] = user_brightness;
4044 }
4045 
4046 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
4047 {
4048 	struct amdgpu_display_manager *dm = bl_get_data(bd);
4049 	int i;
4050 
4051 	for (i = 0; i < dm->num_of_edps; i++) {
4052 		if (bd == dm->backlight_dev[i])
4053 			break;
4054 	}
4055 	if (i >= AMDGPU_DM_MAX_NUM_EDP)
4056 		i = 0;
4057 	amdgpu_dm_backlight_set_level(dm, i, bd->props.brightness);
4058 
4059 	return 0;
4060 }
4061 
4062 static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm,
4063 					 int bl_idx)
4064 {
4065 	struct amdgpu_dm_backlight_caps caps;
4066 	struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx];
4067 
4068 	amdgpu_dm_update_backlight_caps(dm, bl_idx);
4069 	caps = dm->backlight_caps[bl_idx];
4070 
4071 	if (caps.aux_support) {
4072 		u32 avg, peak;
4073 		bool rc;
4074 
4075 		rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
4076 		if (!rc)
4077 			return dm->brightness[bl_idx];
4078 		return convert_brightness_to_user(&caps, avg);
4079 	} else {
4080 		int ret = dc_link_get_backlight_level(link);
4081 
4082 		if (ret == DC_ERROR_UNEXPECTED)
4083 			return dm->brightness[bl_idx];
4084 		return convert_brightness_to_user(&caps, ret);
4085 	}
4086 }
4087 
4088 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
4089 {
4090 	struct amdgpu_display_manager *dm = bl_get_data(bd);
4091 	int i;
4092 
4093 	for (i = 0; i < dm->num_of_edps; i++) {
4094 		if (bd == dm->backlight_dev[i])
4095 			break;
4096 	}
4097 	if (i >= AMDGPU_DM_MAX_NUM_EDP)
4098 		i = 0;
4099 	return amdgpu_dm_backlight_get_level(dm, i);
4100 }
4101 
4102 static const struct backlight_ops amdgpu_dm_backlight_ops = {
4103 	.options = BL_CORE_SUSPENDRESUME,
4104 	.get_brightness = amdgpu_dm_backlight_get_brightness,
4105 	.update_status	= amdgpu_dm_backlight_update_status,
4106 };
4107 
4108 static void
4109 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
4110 {
4111 	char bl_name[16];
4112 	struct backlight_properties props = { 0 };
4113 
4114 	amdgpu_dm_update_backlight_caps(dm, dm->num_of_edps);
4115 	dm->brightness[dm->num_of_edps] = AMDGPU_MAX_BL_LEVEL;
4116 
4117 	props.max_brightness = AMDGPU_MAX_BL_LEVEL;
4118 	props.brightness = AMDGPU_MAX_BL_LEVEL;
4119 	props.type = BACKLIGHT_RAW;
4120 
4121 	snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
4122 		 adev_to_drm(dm->adev)->primary->index + dm->num_of_edps);
4123 
4124 	dm->backlight_dev[dm->num_of_edps] = backlight_device_register(bl_name,
4125 								       adev_to_drm(dm->adev)->dev,
4126 								       dm,
4127 								       &amdgpu_dm_backlight_ops,
4128 								       &props);
4129 
4130 	if (IS_ERR(dm->backlight_dev[dm->num_of_edps]))
4131 		DRM_ERROR("DM: Backlight registration failed!\n");
4132 	else
4133 		DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
4134 }
4135 #endif
4136 
4137 static int initialize_plane(struct amdgpu_display_manager *dm,
4138 			    struct amdgpu_mode_info *mode_info, int plane_id,
4139 			    enum drm_plane_type plane_type,
4140 			    const struct dc_plane_cap *plane_cap)
4141 {
4142 	struct drm_plane *plane;
4143 	unsigned long possible_crtcs;
4144 	int ret = 0;
4145 
4146 	plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
4147 	if (!plane) {
4148 		DRM_ERROR("KMS: Failed to allocate plane\n");
4149 		return -ENOMEM;
4150 	}
4151 	plane->type = plane_type;
4152 
4153 	/*
4154 	 * HACK: IGT tests expect that the primary plane for a CRTC
4155 	 * can only have one possible CRTC. Only expose support for
4156 	 * any CRTC if they're not going to be used as a primary plane
4157 	 * for a CRTC - like overlay or underlay planes.
4158 	 */
4159 	possible_crtcs = 1 << plane_id;
4160 	if (plane_id >= dm->dc->caps.max_streams)
4161 		possible_crtcs = 0xff;
4162 
4163 	ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
4164 
4165 	if (ret) {
4166 		DRM_ERROR("KMS: Failed to initialize plane\n");
4167 		kfree(plane);
4168 		return ret;
4169 	}
4170 
4171 	if (mode_info)
4172 		mode_info->planes[plane_id] = plane;
4173 
4174 	return ret;
4175 }
4176 
4177 
4178 static void register_backlight_device(struct amdgpu_display_manager *dm,
4179 				      struct dc_link *link)
4180 {
4181 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
4182 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
4183 
4184 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
4185 	    link->type != dc_connection_none) {
4186 		/*
4187 		 * Event if registration failed, we should continue with
4188 		 * DM initialization because not having a backlight control
4189 		 * is better then a black screen.
4190 		 */
4191 		if (!dm->backlight_dev[dm->num_of_edps])
4192 			amdgpu_dm_register_backlight_device(dm);
4193 
4194 		if (dm->backlight_dev[dm->num_of_edps]) {
4195 			dm->backlight_link[dm->num_of_edps] = link;
4196 			dm->num_of_edps++;
4197 		}
4198 	}
4199 #endif
4200 }
4201 
4202 
4203 /*
4204  * In this architecture, the association
4205  * connector -> encoder -> crtc
4206  * id not really requried. The crtc and connector will hold the
4207  * display_index as an abstraction to use with DAL component
4208  *
4209  * Returns 0 on success
4210  */
4211 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
4212 {
4213 	struct amdgpu_display_manager *dm = &adev->dm;
4214 	int32_t i;
4215 	struct amdgpu_dm_connector *aconnector = NULL;
4216 	struct amdgpu_encoder *aencoder = NULL;
4217 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
4218 	uint32_t link_cnt;
4219 	int32_t primary_planes;
4220 	enum dc_connection_type new_connection_type = dc_connection_none;
4221 	const struct dc_plane_cap *plane;
4222 	bool psr_feature_enabled = false;
4223 
4224 	dm->display_indexes_num = dm->dc->caps.max_streams;
4225 	/* Update the actual used number of crtc */
4226 	adev->mode_info.num_crtc = adev->dm.display_indexes_num;
4227 
4228 	link_cnt = dm->dc->caps.max_links;
4229 	if (amdgpu_dm_mode_config_init(dm->adev)) {
4230 		DRM_ERROR("DM: Failed to initialize mode config\n");
4231 		return -EINVAL;
4232 	}
4233 
4234 	/* There is one primary plane per CRTC */
4235 	primary_planes = dm->dc->caps.max_streams;
4236 	ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
4237 
4238 	/*
4239 	 * Initialize primary planes, implicit planes for legacy IOCTLS.
4240 	 * Order is reversed to match iteration order in atomic check.
4241 	 */
4242 	for (i = (primary_planes - 1); i >= 0; i--) {
4243 		plane = &dm->dc->caps.planes[i];
4244 
4245 		if (initialize_plane(dm, mode_info, i,
4246 				     DRM_PLANE_TYPE_PRIMARY, plane)) {
4247 			DRM_ERROR("KMS: Failed to initialize primary plane\n");
4248 			goto fail;
4249 		}
4250 	}
4251 
4252 	/*
4253 	 * Initialize overlay planes, index starting after primary planes.
4254 	 * These planes have a higher DRM index than the primary planes since
4255 	 * they should be considered as having a higher z-order.
4256 	 * Order is reversed to match iteration order in atomic check.
4257 	 *
4258 	 * Only support DCN for now, and only expose one so we don't encourage
4259 	 * userspace to use up all the pipes.
4260 	 */
4261 	for (i = 0; i < dm->dc->caps.max_planes; ++i) {
4262 		struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
4263 
4264 		if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
4265 			continue;
4266 
4267 		if (!plane->blends_with_above || !plane->blends_with_below)
4268 			continue;
4269 
4270 		if (!plane->pixel_format_support.argb8888)
4271 			continue;
4272 
4273 		if (initialize_plane(dm, NULL, primary_planes + i,
4274 				     DRM_PLANE_TYPE_OVERLAY, plane)) {
4275 			DRM_ERROR("KMS: Failed to initialize overlay plane\n");
4276 			goto fail;
4277 		}
4278 
4279 		/* Only create one overlay plane. */
4280 		break;
4281 	}
4282 
4283 	for (i = 0; i < dm->dc->caps.max_streams; i++)
4284 		if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
4285 			DRM_ERROR("KMS: Failed to initialize crtc\n");
4286 			goto fail;
4287 		}
4288 
4289 	/* Use Outbox interrupt */
4290 	switch (adev->ip_versions[DCE_HWIP][0]) {
4291 	case IP_VERSION(3, 0, 0):
4292 	case IP_VERSION(3, 1, 2):
4293 	case IP_VERSION(3, 1, 3):
4294 	case IP_VERSION(3, 1, 5):
4295 	case IP_VERSION(3, 1, 6):
4296 	case IP_VERSION(2, 1, 0):
4297 		if (register_outbox_irq_handlers(dm->adev)) {
4298 			DRM_ERROR("DM: Failed to initialize IRQ\n");
4299 			goto fail;
4300 		}
4301 		break;
4302 	default:
4303 		DRM_DEBUG_KMS("Unsupported DCN IP version for outbox: 0x%X\n",
4304 			      adev->ip_versions[DCE_HWIP][0]);
4305 	}
4306 
4307 	/* Determine whether to enable PSR support by default. */
4308 	if (!(amdgpu_dc_debug_mask & DC_DISABLE_PSR)) {
4309 		switch (adev->ip_versions[DCE_HWIP][0]) {
4310 		case IP_VERSION(3, 1, 2):
4311 		case IP_VERSION(3, 1, 3):
4312 		case IP_VERSION(3, 1, 5):
4313 		case IP_VERSION(3, 1, 6):
4314 			psr_feature_enabled = true;
4315 			break;
4316 		default:
4317 			psr_feature_enabled = amdgpu_dc_feature_mask & DC_PSR_MASK;
4318 			break;
4319 		}
4320 	}
4321 
4322 	/* loops over all connectors on the board */
4323 	for (i = 0; i < link_cnt; i++) {
4324 		struct dc_link *link = NULL;
4325 
4326 		if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
4327 			DRM_ERROR(
4328 				"KMS: Cannot support more than %d display indexes\n",
4329 					AMDGPU_DM_MAX_DISPLAY_INDEX);
4330 			continue;
4331 		}
4332 
4333 		aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
4334 		if (!aconnector)
4335 			goto fail;
4336 
4337 		aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
4338 		if (!aencoder)
4339 			goto fail;
4340 
4341 		if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
4342 			DRM_ERROR("KMS: Failed to initialize encoder\n");
4343 			goto fail;
4344 		}
4345 
4346 		if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
4347 			DRM_ERROR("KMS: Failed to initialize connector\n");
4348 			goto fail;
4349 		}
4350 
4351 		link = dc_get_link_at_index(dm->dc, i);
4352 
4353 		if (!dc_link_detect_sink(link, &new_connection_type))
4354 			DRM_ERROR("KMS: Failed to detect connector\n");
4355 
4356 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
4357 			emulated_link_detect(link);
4358 			amdgpu_dm_update_connector_after_detect(aconnector);
4359 
4360 		} else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
4361 			amdgpu_dm_update_connector_after_detect(aconnector);
4362 			register_backlight_device(dm, link);
4363 			if (dm->num_of_edps)
4364 				update_connector_ext_caps(aconnector);
4365 			if (psr_feature_enabled)
4366 				amdgpu_dm_set_psr_caps(link);
4367 
4368 			/* TODO: Fix vblank control helpers to delay PSR entry to allow this when
4369 			 * PSR is also supported.
4370 			 */
4371 			if (link->psr_settings.psr_feature_enabled)
4372 				adev_to_drm(adev)->vblank_disable_immediate = false;
4373 		}
4374 
4375 
4376 	}
4377 
4378 	/* Software is initialized. Now we can register interrupt handlers. */
4379 	switch (adev->asic_type) {
4380 #if defined(CONFIG_DRM_AMD_DC_SI)
4381 	case CHIP_TAHITI:
4382 	case CHIP_PITCAIRN:
4383 	case CHIP_VERDE:
4384 	case CHIP_OLAND:
4385 		if (dce60_register_irq_handlers(dm->adev)) {
4386 			DRM_ERROR("DM: Failed to initialize IRQ\n");
4387 			goto fail;
4388 		}
4389 		break;
4390 #endif
4391 	case CHIP_BONAIRE:
4392 	case CHIP_HAWAII:
4393 	case CHIP_KAVERI:
4394 	case CHIP_KABINI:
4395 	case CHIP_MULLINS:
4396 	case CHIP_TONGA:
4397 	case CHIP_FIJI:
4398 	case CHIP_CARRIZO:
4399 	case CHIP_STONEY:
4400 	case CHIP_POLARIS11:
4401 	case CHIP_POLARIS10:
4402 	case CHIP_POLARIS12:
4403 	case CHIP_VEGAM:
4404 	case CHIP_VEGA10:
4405 	case CHIP_VEGA12:
4406 	case CHIP_VEGA20:
4407 		if (dce110_register_irq_handlers(dm->adev)) {
4408 			DRM_ERROR("DM: Failed to initialize IRQ\n");
4409 			goto fail;
4410 		}
4411 		break;
4412 	default:
4413 		switch (adev->ip_versions[DCE_HWIP][0]) {
4414 		case IP_VERSION(1, 0, 0):
4415 		case IP_VERSION(1, 0, 1):
4416 		case IP_VERSION(2, 0, 2):
4417 		case IP_VERSION(2, 0, 3):
4418 		case IP_VERSION(2, 0, 0):
4419 		case IP_VERSION(2, 1, 0):
4420 		case IP_VERSION(3, 0, 0):
4421 		case IP_VERSION(3, 0, 2):
4422 		case IP_VERSION(3, 0, 3):
4423 		case IP_VERSION(3, 0, 1):
4424 		case IP_VERSION(3, 1, 2):
4425 		case IP_VERSION(3, 1, 3):
4426 		case IP_VERSION(3, 1, 5):
4427 		case IP_VERSION(3, 1, 6):
4428 			if (dcn10_register_irq_handlers(dm->adev)) {
4429 				DRM_ERROR("DM: Failed to initialize IRQ\n");
4430 				goto fail;
4431 			}
4432 			break;
4433 		default:
4434 			DRM_ERROR("Unsupported DCE IP versions: 0x%X\n",
4435 					adev->ip_versions[DCE_HWIP][0]);
4436 			goto fail;
4437 		}
4438 		break;
4439 	}
4440 
4441 	return 0;
4442 fail:
4443 	kfree(aencoder);
4444 	kfree(aconnector);
4445 
4446 	return -EINVAL;
4447 }
4448 
4449 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
4450 {
4451 	drm_atomic_private_obj_fini(&dm->atomic_obj);
4452 	return;
4453 }
4454 
4455 /******************************************************************************
4456  * amdgpu_display_funcs functions
4457  *****************************************************************************/
4458 
4459 /*
4460  * dm_bandwidth_update - program display watermarks
4461  *
4462  * @adev: amdgpu_device pointer
4463  *
4464  * Calculate and program the display watermarks and line buffer allocation.
4465  */
4466 static void dm_bandwidth_update(struct amdgpu_device *adev)
4467 {
4468 	/* TODO: implement later */
4469 }
4470 
4471 static const struct amdgpu_display_funcs dm_display_funcs = {
4472 	.bandwidth_update = dm_bandwidth_update, /* called unconditionally */
4473 	.vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
4474 	.backlight_set_level = NULL, /* never called for DC */
4475 	.backlight_get_level = NULL, /* never called for DC */
4476 	.hpd_sense = NULL,/* called unconditionally */
4477 	.hpd_set_polarity = NULL, /* called unconditionally */
4478 	.hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
4479 	.page_flip_get_scanoutpos =
4480 		dm_crtc_get_scanoutpos,/* called unconditionally */
4481 	.add_encoder = NULL, /* VBIOS parsing. DAL does it. */
4482 	.add_connector = NULL, /* VBIOS parsing. DAL does it. */
4483 };
4484 
4485 #if defined(CONFIG_DEBUG_KERNEL_DC)
4486 
4487 static ssize_t s3_debug_store(struct device *device,
4488 			      struct device_attribute *attr,
4489 			      const char *buf,
4490 			      size_t count)
4491 {
4492 	int ret;
4493 	int s3_state;
4494 	struct drm_device *drm_dev = dev_get_drvdata(device);
4495 	struct amdgpu_device *adev = drm_to_adev(drm_dev);
4496 
4497 	ret = kstrtoint(buf, 0, &s3_state);
4498 
4499 	if (ret == 0) {
4500 		if (s3_state) {
4501 			dm_resume(adev);
4502 			drm_kms_helper_hotplug_event(adev_to_drm(adev));
4503 		} else
4504 			dm_suspend(adev);
4505 	}
4506 
4507 	return ret == 0 ? count : 0;
4508 }
4509 
4510 DEVICE_ATTR_WO(s3_debug);
4511 
4512 #endif
4513 
4514 static int dm_early_init(void *handle)
4515 {
4516 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4517 
4518 	switch (adev->asic_type) {
4519 #if defined(CONFIG_DRM_AMD_DC_SI)
4520 	case CHIP_TAHITI:
4521 	case CHIP_PITCAIRN:
4522 	case CHIP_VERDE:
4523 		adev->mode_info.num_crtc = 6;
4524 		adev->mode_info.num_hpd = 6;
4525 		adev->mode_info.num_dig = 6;
4526 		break;
4527 	case CHIP_OLAND:
4528 		adev->mode_info.num_crtc = 2;
4529 		adev->mode_info.num_hpd = 2;
4530 		adev->mode_info.num_dig = 2;
4531 		break;
4532 #endif
4533 	case CHIP_BONAIRE:
4534 	case CHIP_HAWAII:
4535 		adev->mode_info.num_crtc = 6;
4536 		adev->mode_info.num_hpd = 6;
4537 		adev->mode_info.num_dig = 6;
4538 		break;
4539 	case CHIP_KAVERI:
4540 		adev->mode_info.num_crtc = 4;
4541 		adev->mode_info.num_hpd = 6;
4542 		adev->mode_info.num_dig = 7;
4543 		break;
4544 	case CHIP_KABINI:
4545 	case CHIP_MULLINS:
4546 		adev->mode_info.num_crtc = 2;
4547 		adev->mode_info.num_hpd = 6;
4548 		adev->mode_info.num_dig = 6;
4549 		break;
4550 	case CHIP_FIJI:
4551 	case CHIP_TONGA:
4552 		adev->mode_info.num_crtc = 6;
4553 		adev->mode_info.num_hpd = 6;
4554 		adev->mode_info.num_dig = 7;
4555 		break;
4556 	case CHIP_CARRIZO:
4557 		adev->mode_info.num_crtc = 3;
4558 		adev->mode_info.num_hpd = 6;
4559 		adev->mode_info.num_dig = 9;
4560 		break;
4561 	case CHIP_STONEY:
4562 		adev->mode_info.num_crtc = 2;
4563 		adev->mode_info.num_hpd = 6;
4564 		adev->mode_info.num_dig = 9;
4565 		break;
4566 	case CHIP_POLARIS11:
4567 	case CHIP_POLARIS12:
4568 		adev->mode_info.num_crtc = 5;
4569 		adev->mode_info.num_hpd = 5;
4570 		adev->mode_info.num_dig = 5;
4571 		break;
4572 	case CHIP_POLARIS10:
4573 	case CHIP_VEGAM:
4574 		adev->mode_info.num_crtc = 6;
4575 		adev->mode_info.num_hpd = 6;
4576 		adev->mode_info.num_dig = 6;
4577 		break;
4578 	case CHIP_VEGA10:
4579 	case CHIP_VEGA12:
4580 	case CHIP_VEGA20:
4581 		adev->mode_info.num_crtc = 6;
4582 		adev->mode_info.num_hpd = 6;
4583 		adev->mode_info.num_dig = 6;
4584 		break;
4585 	default:
4586 
4587 		switch (adev->ip_versions[DCE_HWIP][0]) {
4588 		case IP_VERSION(2, 0, 2):
4589 		case IP_VERSION(3, 0, 0):
4590 			adev->mode_info.num_crtc = 6;
4591 			adev->mode_info.num_hpd = 6;
4592 			adev->mode_info.num_dig = 6;
4593 			break;
4594 		case IP_VERSION(2, 0, 0):
4595 		case IP_VERSION(3, 0, 2):
4596 			adev->mode_info.num_crtc = 5;
4597 			adev->mode_info.num_hpd = 5;
4598 			adev->mode_info.num_dig = 5;
4599 			break;
4600 		case IP_VERSION(2, 0, 3):
4601 		case IP_VERSION(3, 0, 3):
4602 			adev->mode_info.num_crtc = 2;
4603 			adev->mode_info.num_hpd = 2;
4604 			adev->mode_info.num_dig = 2;
4605 			break;
4606 		case IP_VERSION(1, 0, 0):
4607 		case IP_VERSION(1, 0, 1):
4608 		case IP_VERSION(3, 0, 1):
4609 		case IP_VERSION(2, 1, 0):
4610 		case IP_VERSION(3, 1, 2):
4611 		case IP_VERSION(3, 1, 3):
4612 		case IP_VERSION(3, 1, 5):
4613 		case IP_VERSION(3, 1, 6):
4614 			adev->mode_info.num_crtc = 4;
4615 			adev->mode_info.num_hpd = 4;
4616 			adev->mode_info.num_dig = 4;
4617 			break;
4618 		default:
4619 			DRM_ERROR("Unsupported DCE IP versions: 0x%x\n",
4620 					adev->ip_versions[DCE_HWIP][0]);
4621 			return -EINVAL;
4622 		}
4623 		break;
4624 	}
4625 
4626 	amdgpu_dm_set_irq_funcs(adev);
4627 
4628 	if (adev->mode_info.funcs == NULL)
4629 		adev->mode_info.funcs = &dm_display_funcs;
4630 
4631 	/*
4632 	 * Note: Do NOT change adev->audio_endpt_rreg and
4633 	 * adev->audio_endpt_wreg because they are initialised in
4634 	 * amdgpu_device_init()
4635 	 */
4636 #if defined(CONFIG_DEBUG_KERNEL_DC)
4637 	device_create_file(
4638 		adev_to_drm(adev)->dev,
4639 		&dev_attr_s3_debug);
4640 #endif
4641 
4642 	return 0;
4643 }
4644 
4645 static bool modeset_required(struct drm_crtc_state *crtc_state,
4646 			     struct dc_stream_state *new_stream,
4647 			     struct dc_stream_state *old_stream)
4648 {
4649 	return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4650 }
4651 
4652 static bool modereset_required(struct drm_crtc_state *crtc_state)
4653 {
4654 	return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4655 }
4656 
4657 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
4658 {
4659 	drm_encoder_cleanup(encoder);
4660 	kfree(encoder);
4661 }
4662 
4663 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
4664 	.destroy = amdgpu_dm_encoder_destroy,
4665 };
4666 
4667 
4668 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
4669 					 struct drm_framebuffer *fb,
4670 					 int *min_downscale, int *max_upscale)
4671 {
4672 	struct amdgpu_device *adev = drm_to_adev(dev);
4673 	struct dc *dc = adev->dm.dc;
4674 	/* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
4675 	struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
4676 
4677 	switch (fb->format->format) {
4678 	case DRM_FORMAT_P010:
4679 	case DRM_FORMAT_NV12:
4680 	case DRM_FORMAT_NV21:
4681 		*max_upscale = plane_cap->max_upscale_factor.nv12;
4682 		*min_downscale = plane_cap->max_downscale_factor.nv12;
4683 		break;
4684 
4685 	case DRM_FORMAT_XRGB16161616F:
4686 	case DRM_FORMAT_ARGB16161616F:
4687 	case DRM_FORMAT_XBGR16161616F:
4688 	case DRM_FORMAT_ABGR16161616F:
4689 		*max_upscale = plane_cap->max_upscale_factor.fp16;
4690 		*min_downscale = plane_cap->max_downscale_factor.fp16;
4691 		break;
4692 
4693 	default:
4694 		*max_upscale = plane_cap->max_upscale_factor.argb8888;
4695 		*min_downscale = plane_cap->max_downscale_factor.argb8888;
4696 		break;
4697 	}
4698 
4699 	/*
4700 	 * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
4701 	 * scaling factor of 1.0 == 1000 units.
4702 	 */
4703 	if (*max_upscale == 1)
4704 		*max_upscale = 1000;
4705 
4706 	if (*min_downscale == 1)
4707 		*min_downscale = 1000;
4708 }
4709 
4710 
4711 static int fill_dc_scaling_info(struct amdgpu_device *adev,
4712 				const struct drm_plane_state *state,
4713 				struct dc_scaling_info *scaling_info)
4714 {
4715 	int scale_w, scale_h, min_downscale, max_upscale;
4716 
4717 	memset(scaling_info, 0, sizeof(*scaling_info));
4718 
4719 	/* Source is fixed 16.16 but we ignore mantissa for now... */
4720 	scaling_info->src_rect.x = state->src_x >> 16;
4721 	scaling_info->src_rect.y = state->src_y >> 16;
4722 
4723 	/*
4724 	 * For reasons we don't (yet) fully understand a non-zero
4725 	 * src_y coordinate into an NV12 buffer can cause a
4726 	 * system hang on DCN1x.
4727 	 * To avoid hangs (and maybe be overly cautious)
4728 	 * let's reject both non-zero src_x and src_y.
4729 	 *
4730 	 * We currently know of only one use-case to reproduce a
4731 	 * scenario with non-zero src_x and src_y for NV12, which
4732 	 * is to gesture the YouTube Android app into full screen
4733 	 * on ChromeOS.
4734 	 */
4735 	if (((adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 0)) ||
4736 	    (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 1))) &&
4737 	    (state->fb && state->fb->format->format == DRM_FORMAT_NV12 &&
4738 	    (scaling_info->src_rect.x != 0 || scaling_info->src_rect.y != 0)))
4739 		return -EINVAL;
4740 
4741 	scaling_info->src_rect.width = state->src_w >> 16;
4742 	if (scaling_info->src_rect.width == 0)
4743 		return -EINVAL;
4744 
4745 	scaling_info->src_rect.height = state->src_h >> 16;
4746 	if (scaling_info->src_rect.height == 0)
4747 		return -EINVAL;
4748 
4749 	scaling_info->dst_rect.x = state->crtc_x;
4750 	scaling_info->dst_rect.y = state->crtc_y;
4751 
4752 	if (state->crtc_w == 0)
4753 		return -EINVAL;
4754 
4755 	scaling_info->dst_rect.width = state->crtc_w;
4756 
4757 	if (state->crtc_h == 0)
4758 		return -EINVAL;
4759 
4760 	scaling_info->dst_rect.height = state->crtc_h;
4761 
4762 	/* DRM doesn't specify clipping on destination output. */
4763 	scaling_info->clip_rect = scaling_info->dst_rect;
4764 
4765 	/* Validate scaling per-format with DC plane caps */
4766 	if (state->plane && state->plane->dev && state->fb) {
4767 		get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
4768 					     &min_downscale, &max_upscale);
4769 	} else {
4770 		min_downscale = 250;
4771 		max_upscale = 16000;
4772 	}
4773 
4774 	scale_w = scaling_info->dst_rect.width * 1000 /
4775 		  scaling_info->src_rect.width;
4776 
4777 	if (scale_w < min_downscale || scale_w > max_upscale)
4778 		return -EINVAL;
4779 
4780 	scale_h = scaling_info->dst_rect.height * 1000 /
4781 		  scaling_info->src_rect.height;
4782 
4783 	if (scale_h < min_downscale || scale_h > max_upscale)
4784 		return -EINVAL;
4785 
4786 	/*
4787 	 * The "scaling_quality" can be ignored for now, quality = 0 has DC
4788 	 * assume reasonable defaults based on the format.
4789 	 */
4790 
4791 	return 0;
4792 }
4793 
4794 static void
4795 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
4796 				 uint64_t tiling_flags)
4797 {
4798 	/* Fill GFX8 params */
4799 	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
4800 		unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
4801 
4802 		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
4803 		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
4804 		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
4805 		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
4806 		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
4807 
4808 		/* XXX fix me for VI */
4809 		tiling_info->gfx8.num_banks = num_banks;
4810 		tiling_info->gfx8.array_mode =
4811 				DC_ARRAY_2D_TILED_THIN1;
4812 		tiling_info->gfx8.tile_split = tile_split;
4813 		tiling_info->gfx8.bank_width = bankw;
4814 		tiling_info->gfx8.bank_height = bankh;
4815 		tiling_info->gfx8.tile_aspect = mtaspect;
4816 		tiling_info->gfx8.tile_mode =
4817 				DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4818 	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4819 			== DC_ARRAY_1D_TILED_THIN1) {
4820 		tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
4821 	}
4822 
4823 	tiling_info->gfx8.pipe_config =
4824 			AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
4825 }
4826 
4827 static void
4828 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4829 				  union dc_tiling_info *tiling_info)
4830 {
4831 	tiling_info->gfx9.num_pipes =
4832 		adev->gfx.config.gb_addr_config_fields.num_pipes;
4833 	tiling_info->gfx9.num_banks =
4834 		adev->gfx.config.gb_addr_config_fields.num_banks;
4835 	tiling_info->gfx9.pipe_interleave =
4836 		adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4837 	tiling_info->gfx9.num_shader_engines =
4838 		adev->gfx.config.gb_addr_config_fields.num_se;
4839 	tiling_info->gfx9.max_compressed_frags =
4840 		adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4841 	tiling_info->gfx9.num_rb_per_se =
4842 		adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4843 	tiling_info->gfx9.shaderEnable = 1;
4844 	if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
4845 		tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
4846 }
4847 
4848 static int
4849 validate_dcc(struct amdgpu_device *adev,
4850 	     const enum surface_pixel_format format,
4851 	     const enum dc_rotation_angle rotation,
4852 	     const union dc_tiling_info *tiling_info,
4853 	     const struct dc_plane_dcc_param *dcc,
4854 	     const struct dc_plane_address *address,
4855 	     const struct plane_size *plane_size)
4856 {
4857 	struct dc *dc = adev->dm.dc;
4858 	struct dc_dcc_surface_param input;
4859 	struct dc_surface_dcc_cap output;
4860 
4861 	memset(&input, 0, sizeof(input));
4862 	memset(&output, 0, sizeof(output));
4863 
4864 	if (!dcc->enable)
4865 		return 0;
4866 
4867 	if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4868 	    !dc->cap_funcs.get_dcc_compression_cap)
4869 		return -EINVAL;
4870 
4871 	input.format = format;
4872 	input.surface_size.width = plane_size->surface_size.width;
4873 	input.surface_size.height = plane_size->surface_size.height;
4874 	input.swizzle_mode = tiling_info->gfx9.swizzle;
4875 
4876 	if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
4877 		input.scan = SCAN_DIRECTION_HORIZONTAL;
4878 	else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
4879 		input.scan = SCAN_DIRECTION_VERTICAL;
4880 
4881 	if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
4882 		return -EINVAL;
4883 
4884 	if (!output.capable)
4885 		return -EINVAL;
4886 
4887 	if (dcc->independent_64b_blks == 0 &&
4888 	    output.grph.rgb.independent_64b_blks != 0)
4889 		return -EINVAL;
4890 
4891 	return 0;
4892 }
4893 
4894 static bool
4895 modifier_has_dcc(uint64_t modifier)
4896 {
4897 	return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4898 }
4899 
4900 static unsigned
4901 modifier_gfx9_swizzle_mode(uint64_t modifier)
4902 {
4903 	if (modifier == DRM_FORMAT_MOD_LINEAR)
4904 		return 0;
4905 
4906 	return AMD_FMT_MOD_GET(TILE, modifier);
4907 }
4908 
4909 static const struct drm_format_info *
4910 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4911 {
4912 	return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
4913 }
4914 
4915 static void
4916 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4917 				    union dc_tiling_info *tiling_info,
4918 				    uint64_t modifier)
4919 {
4920 	unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4921 	unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4922 	unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4923 	unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4924 
4925 	fill_gfx9_tiling_info_from_device(adev, tiling_info);
4926 
4927 	if (!IS_AMD_FMT_MOD(modifier))
4928 		return;
4929 
4930 	tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4931 	tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4932 
4933 	if (adev->family >= AMDGPU_FAMILY_NV) {
4934 		tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4935 	} else {
4936 		tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4937 
4938 		/* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4939 	}
4940 }
4941 
4942 enum dm_micro_swizzle {
4943 	MICRO_SWIZZLE_Z = 0,
4944 	MICRO_SWIZZLE_S = 1,
4945 	MICRO_SWIZZLE_D = 2,
4946 	MICRO_SWIZZLE_R = 3
4947 };
4948 
4949 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4950 					  uint32_t format,
4951 					  uint64_t modifier)
4952 {
4953 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
4954 	const struct drm_format_info *info = drm_format_info(format);
4955 	int i;
4956 
4957 	enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4958 
4959 	if (!info)
4960 		return false;
4961 
4962 	/*
4963 	 * We always have to allow these modifiers:
4964 	 * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers.
4965 	 * 2. Not passing any modifiers is the same as explicitly passing INVALID.
4966 	 */
4967 	if (modifier == DRM_FORMAT_MOD_LINEAR ||
4968 	    modifier == DRM_FORMAT_MOD_INVALID) {
4969 		return true;
4970 	}
4971 
4972 	/* Check that the modifier is on the list of the plane's supported modifiers. */
4973 	for (i = 0; i < plane->modifier_count; i++) {
4974 		if (modifier == plane->modifiers[i])
4975 			break;
4976 	}
4977 	if (i == plane->modifier_count)
4978 		return false;
4979 
4980 	/*
4981 	 * For D swizzle the canonical modifier depends on the bpp, so check
4982 	 * it here.
4983 	 */
4984 	if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4985 	    adev->family >= AMDGPU_FAMILY_NV) {
4986 		if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4987 			return false;
4988 	}
4989 
4990 	if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4991 	    info->cpp[0] < 8)
4992 		return false;
4993 
4994 	if (modifier_has_dcc(modifier)) {
4995 		/* Per radeonsi comments 16/64 bpp are more complicated. */
4996 		if (info->cpp[0] != 4)
4997 			return false;
4998 		/* We support multi-planar formats, but not when combined with
4999 		 * additional DCC metadata planes. */
5000 		if (info->num_planes > 1)
5001 			return false;
5002 	}
5003 
5004 	return true;
5005 }
5006 
5007 static void
5008 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
5009 {
5010 	if (!*mods)
5011 		return;
5012 
5013 	if (*cap - *size < 1) {
5014 		uint64_t new_cap = *cap * 2;
5015 		uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
5016 
5017 		if (!new_mods) {
5018 			kfree(*mods);
5019 			*mods = NULL;
5020 			return;
5021 		}
5022 
5023 		memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
5024 		kfree(*mods);
5025 		*mods = new_mods;
5026 		*cap = new_cap;
5027 	}
5028 
5029 	(*mods)[*size] = mod;
5030 	*size += 1;
5031 }
5032 
5033 static void
5034 add_gfx9_modifiers(const struct amdgpu_device *adev,
5035 		   uint64_t **mods, uint64_t *size, uint64_t *capacity)
5036 {
5037 	int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5038 	int pipe_xor_bits = min(8, pipes +
5039 				ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
5040 	int bank_xor_bits = min(8 - pipe_xor_bits,
5041 				ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
5042 	int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
5043 		 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
5044 
5045 
5046 	if (adev->family == AMDGPU_FAMILY_RV) {
5047 		/* Raven2 and later */
5048 		bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
5049 
5050 		/*
5051 		 * No _D DCC swizzles yet because we only allow 32bpp, which
5052 		 * doesn't support _D on DCN
5053 		 */
5054 
5055 		if (has_constant_encode) {
5056 			add_modifier(mods, size, capacity, AMD_FMT_MOD |
5057 				    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5058 				    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5059 				    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5060 				    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5061 				    AMD_FMT_MOD_SET(DCC, 1) |
5062 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5063 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5064 				    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
5065 		}
5066 
5067 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
5068 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5069 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5070 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5071 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5072 			    AMD_FMT_MOD_SET(DCC, 1) |
5073 			    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5074 			    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5075 			    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
5076 
5077 		if (has_constant_encode) {
5078 			add_modifier(mods, size, capacity, AMD_FMT_MOD |
5079 				    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5080 				    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5081 				    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5082 				    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5083 				    AMD_FMT_MOD_SET(DCC, 1) |
5084 				    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5085 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5086 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5087 
5088 				    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5089 				    AMD_FMT_MOD_SET(RB, rb) |
5090 				    AMD_FMT_MOD_SET(PIPE, pipes));
5091 		}
5092 
5093 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
5094 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5095 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5096 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5097 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5098 			    AMD_FMT_MOD_SET(DCC, 1) |
5099 			    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5100 			    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5101 			    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5102 			    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
5103 			    AMD_FMT_MOD_SET(RB, rb) |
5104 			    AMD_FMT_MOD_SET(PIPE, pipes));
5105 	}
5106 
5107 	/*
5108 	 * Only supported for 64bpp on Raven, will be filtered on format in
5109 	 * dm_plane_format_mod_supported.
5110 	 */
5111 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5112 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
5113 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5114 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5115 		    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
5116 
5117 	if (adev->family == AMDGPU_FAMILY_RV) {
5118 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
5119 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5120 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5121 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5122 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
5123 	}
5124 
5125 	/*
5126 	 * Only supported for 64bpp on Raven, will be filtered on format in
5127 	 * dm_plane_format_mod_supported.
5128 	 */
5129 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5130 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5131 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5132 
5133 	if (adev->family == AMDGPU_FAMILY_RV) {
5134 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
5135 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5136 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5137 	}
5138 }
5139 
5140 static void
5141 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
5142 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
5143 {
5144 	int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5145 
5146 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5147 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5148 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5149 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5150 		    AMD_FMT_MOD_SET(DCC, 1) |
5151 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5152 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5153 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5154 
5155 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5156 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5157 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5158 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5159 		    AMD_FMT_MOD_SET(DCC, 1) |
5160 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5161 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5162 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5163 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5164 
5165 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5166 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5167 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5168 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5169 
5170 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5171 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5172 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5173 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5174 
5175 
5176 	/* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5177 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5178 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5179 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5180 
5181 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5182 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5183 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5184 }
5185 
5186 static void
5187 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
5188 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
5189 {
5190 	int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5191 	int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
5192 
5193 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5194 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5195 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5196 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5197 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
5198 		    AMD_FMT_MOD_SET(DCC, 1) |
5199 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5200 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5201 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5202 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5203 
5204 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5205 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5206 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5207 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5208 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
5209 		    AMD_FMT_MOD_SET(DCC, 1) |
5210 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5211 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5212 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5213 
5214 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5215 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5216 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5217 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5218 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
5219 		    AMD_FMT_MOD_SET(DCC, 1) |
5220 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5221 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5222 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5223 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5224 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5225 
5226 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5227 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5228 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5229 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5230 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
5231 		    AMD_FMT_MOD_SET(DCC, 1) |
5232 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5233 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5234 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5235 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5236 
5237 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5238 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5239 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5240 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5241 		    AMD_FMT_MOD_SET(PACKERS, pkrs));
5242 
5243 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5244 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5245 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5246 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5247 		    AMD_FMT_MOD_SET(PACKERS, pkrs));
5248 
5249 	/* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5250 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5251 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5252 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5253 
5254 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5255 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5256 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5257 }
5258 
5259 static int
5260 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
5261 {
5262 	uint64_t size = 0, capacity = 128;
5263 	*mods = NULL;
5264 
5265 	/* We have not hooked up any pre-GFX9 modifiers. */
5266 	if (adev->family < AMDGPU_FAMILY_AI)
5267 		return 0;
5268 
5269 	*mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
5270 
5271 	if (plane_type == DRM_PLANE_TYPE_CURSOR) {
5272 		add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5273 		add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5274 		return *mods ? 0 : -ENOMEM;
5275 	}
5276 
5277 	switch (adev->family) {
5278 	case AMDGPU_FAMILY_AI:
5279 	case AMDGPU_FAMILY_RV:
5280 		add_gfx9_modifiers(adev, mods, &size, &capacity);
5281 		break;
5282 	case AMDGPU_FAMILY_NV:
5283 	case AMDGPU_FAMILY_VGH:
5284 	case AMDGPU_FAMILY_YC:
5285 	case AMDGPU_FAMILY_GC_10_3_6:
5286 	case AMDGPU_FAMILY_GC_10_3_7:
5287 		if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
5288 			add_gfx10_3_modifiers(adev, mods, &size, &capacity);
5289 		else
5290 			add_gfx10_1_modifiers(adev, mods, &size, &capacity);
5291 		break;
5292 	}
5293 
5294 	add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5295 
5296 	/* INVALID marks the end of the list. */
5297 	add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5298 
5299 	if (!*mods)
5300 		return -ENOMEM;
5301 
5302 	return 0;
5303 }
5304 
5305 static int
5306 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
5307 					  const struct amdgpu_framebuffer *afb,
5308 					  const enum surface_pixel_format format,
5309 					  const enum dc_rotation_angle rotation,
5310 					  const struct plane_size *plane_size,
5311 					  union dc_tiling_info *tiling_info,
5312 					  struct dc_plane_dcc_param *dcc,
5313 					  struct dc_plane_address *address,
5314 					  const bool force_disable_dcc)
5315 {
5316 	const uint64_t modifier = afb->base.modifier;
5317 	int ret = 0;
5318 
5319 	fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
5320 	tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
5321 
5322 	if (modifier_has_dcc(modifier) && !force_disable_dcc) {
5323 		uint64_t dcc_address = afb->address + afb->base.offsets[1];
5324 		bool independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
5325 		bool independent_128b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_128B, modifier);
5326 
5327 		dcc->enable = 1;
5328 		dcc->meta_pitch = afb->base.pitches[1];
5329 		dcc->independent_64b_blks = independent_64b_blks;
5330 		if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) {
5331 			if (independent_64b_blks && independent_128b_blks)
5332 				dcc->dcc_ind_blk = hubp_ind_block_64b_no_128bcl;
5333 			else if (independent_128b_blks)
5334 				dcc->dcc_ind_blk = hubp_ind_block_128b;
5335 			else if (independent_64b_blks && !independent_128b_blks)
5336 				dcc->dcc_ind_blk = hubp_ind_block_64b;
5337 			else
5338 				dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5339 		} else {
5340 			if (independent_64b_blks)
5341 				dcc->dcc_ind_blk = hubp_ind_block_64b;
5342 			else
5343 				dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5344 		}
5345 
5346 		address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
5347 		address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
5348 	}
5349 
5350 	ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
5351 	if (ret)
5352 		drm_dbg_kms(adev_to_drm(adev), "validate_dcc: returned error: %d\n", ret);
5353 
5354 	return ret;
5355 }
5356 
5357 static int
5358 fill_plane_buffer_attributes(struct amdgpu_device *adev,
5359 			     const struct amdgpu_framebuffer *afb,
5360 			     const enum surface_pixel_format format,
5361 			     const enum dc_rotation_angle rotation,
5362 			     const uint64_t tiling_flags,
5363 			     union dc_tiling_info *tiling_info,
5364 			     struct plane_size *plane_size,
5365 			     struct dc_plane_dcc_param *dcc,
5366 			     struct dc_plane_address *address,
5367 			     bool tmz_surface,
5368 			     bool force_disable_dcc)
5369 {
5370 	const struct drm_framebuffer *fb = &afb->base;
5371 	int ret;
5372 
5373 	memset(tiling_info, 0, sizeof(*tiling_info));
5374 	memset(plane_size, 0, sizeof(*plane_size));
5375 	memset(dcc, 0, sizeof(*dcc));
5376 	memset(address, 0, sizeof(*address));
5377 
5378 	address->tmz_surface = tmz_surface;
5379 
5380 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
5381 		uint64_t addr = afb->address + fb->offsets[0];
5382 
5383 		plane_size->surface_size.x = 0;
5384 		plane_size->surface_size.y = 0;
5385 		plane_size->surface_size.width = fb->width;
5386 		plane_size->surface_size.height = fb->height;
5387 		plane_size->surface_pitch =
5388 			fb->pitches[0] / fb->format->cpp[0];
5389 
5390 		address->type = PLN_ADDR_TYPE_GRAPHICS;
5391 		address->grph.addr.low_part = lower_32_bits(addr);
5392 		address->grph.addr.high_part = upper_32_bits(addr);
5393 	} else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
5394 		uint64_t luma_addr = afb->address + fb->offsets[0];
5395 		uint64_t chroma_addr = afb->address + fb->offsets[1];
5396 
5397 		plane_size->surface_size.x = 0;
5398 		plane_size->surface_size.y = 0;
5399 		plane_size->surface_size.width = fb->width;
5400 		plane_size->surface_size.height = fb->height;
5401 		plane_size->surface_pitch =
5402 			fb->pitches[0] / fb->format->cpp[0];
5403 
5404 		plane_size->chroma_size.x = 0;
5405 		plane_size->chroma_size.y = 0;
5406 		/* TODO: set these based on surface format */
5407 		plane_size->chroma_size.width = fb->width / 2;
5408 		plane_size->chroma_size.height = fb->height / 2;
5409 
5410 		plane_size->chroma_pitch =
5411 			fb->pitches[1] / fb->format->cpp[1];
5412 
5413 		address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
5414 		address->video_progressive.luma_addr.low_part =
5415 			lower_32_bits(luma_addr);
5416 		address->video_progressive.luma_addr.high_part =
5417 			upper_32_bits(luma_addr);
5418 		address->video_progressive.chroma_addr.low_part =
5419 			lower_32_bits(chroma_addr);
5420 		address->video_progressive.chroma_addr.high_part =
5421 			upper_32_bits(chroma_addr);
5422 	}
5423 
5424 	if (adev->family >= AMDGPU_FAMILY_AI) {
5425 		ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
5426 								rotation, plane_size,
5427 								tiling_info, dcc,
5428 								address,
5429 								force_disable_dcc);
5430 		if (ret)
5431 			return ret;
5432 	} else {
5433 		fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
5434 	}
5435 
5436 	return 0;
5437 }
5438 
5439 static void
5440 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
5441 			       bool *per_pixel_alpha, bool *pre_multiplied_alpha,
5442 			       bool *global_alpha, int *global_alpha_value)
5443 {
5444 	*per_pixel_alpha = false;
5445 	*pre_multiplied_alpha = true;
5446 	*global_alpha = false;
5447 	*global_alpha_value = 0xff;
5448 
5449 	if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
5450 		return;
5451 
5452 	if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI ||
5453 		plane_state->pixel_blend_mode == DRM_MODE_BLEND_COVERAGE) {
5454 		static const uint32_t alpha_formats[] = {
5455 			DRM_FORMAT_ARGB8888,
5456 			DRM_FORMAT_RGBA8888,
5457 			DRM_FORMAT_ABGR8888,
5458 		};
5459 		uint32_t format = plane_state->fb->format->format;
5460 		unsigned int i;
5461 
5462 		for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
5463 			if (format == alpha_formats[i]) {
5464 				*per_pixel_alpha = true;
5465 				break;
5466 			}
5467 		}
5468 
5469 		if (*per_pixel_alpha && plane_state->pixel_blend_mode == DRM_MODE_BLEND_COVERAGE)
5470 			*pre_multiplied_alpha = false;
5471 	}
5472 
5473 	if (plane_state->alpha < 0xffff) {
5474 		*global_alpha = true;
5475 		*global_alpha_value = plane_state->alpha >> 8;
5476 	}
5477 }
5478 
5479 static int
5480 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
5481 			    const enum surface_pixel_format format,
5482 			    enum dc_color_space *color_space)
5483 {
5484 	bool full_range;
5485 
5486 	*color_space = COLOR_SPACE_SRGB;
5487 
5488 	/* DRM color properties only affect non-RGB formats. */
5489 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
5490 		return 0;
5491 
5492 	full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
5493 
5494 	switch (plane_state->color_encoding) {
5495 	case DRM_COLOR_YCBCR_BT601:
5496 		if (full_range)
5497 			*color_space = COLOR_SPACE_YCBCR601;
5498 		else
5499 			*color_space = COLOR_SPACE_YCBCR601_LIMITED;
5500 		break;
5501 
5502 	case DRM_COLOR_YCBCR_BT709:
5503 		if (full_range)
5504 			*color_space = COLOR_SPACE_YCBCR709;
5505 		else
5506 			*color_space = COLOR_SPACE_YCBCR709_LIMITED;
5507 		break;
5508 
5509 	case DRM_COLOR_YCBCR_BT2020:
5510 		if (full_range)
5511 			*color_space = COLOR_SPACE_2020_YCBCR;
5512 		else
5513 			return -EINVAL;
5514 		break;
5515 
5516 	default:
5517 		return -EINVAL;
5518 	}
5519 
5520 	return 0;
5521 }
5522 
5523 static int
5524 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
5525 			    const struct drm_plane_state *plane_state,
5526 			    const uint64_t tiling_flags,
5527 			    struct dc_plane_info *plane_info,
5528 			    struct dc_plane_address *address,
5529 			    bool tmz_surface,
5530 			    bool force_disable_dcc)
5531 {
5532 	const struct drm_framebuffer *fb = plane_state->fb;
5533 	const struct amdgpu_framebuffer *afb =
5534 		to_amdgpu_framebuffer(plane_state->fb);
5535 	int ret;
5536 
5537 	memset(plane_info, 0, sizeof(*plane_info));
5538 
5539 	switch (fb->format->format) {
5540 	case DRM_FORMAT_C8:
5541 		plane_info->format =
5542 			SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
5543 		break;
5544 	case DRM_FORMAT_RGB565:
5545 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
5546 		break;
5547 	case DRM_FORMAT_XRGB8888:
5548 	case DRM_FORMAT_ARGB8888:
5549 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
5550 		break;
5551 	case DRM_FORMAT_XRGB2101010:
5552 	case DRM_FORMAT_ARGB2101010:
5553 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
5554 		break;
5555 	case DRM_FORMAT_XBGR2101010:
5556 	case DRM_FORMAT_ABGR2101010:
5557 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
5558 		break;
5559 	case DRM_FORMAT_XBGR8888:
5560 	case DRM_FORMAT_ABGR8888:
5561 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
5562 		break;
5563 	case DRM_FORMAT_NV21:
5564 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
5565 		break;
5566 	case DRM_FORMAT_NV12:
5567 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
5568 		break;
5569 	case DRM_FORMAT_P010:
5570 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
5571 		break;
5572 	case DRM_FORMAT_XRGB16161616F:
5573 	case DRM_FORMAT_ARGB16161616F:
5574 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
5575 		break;
5576 	case DRM_FORMAT_XBGR16161616F:
5577 	case DRM_FORMAT_ABGR16161616F:
5578 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
5579 		break;
5580 	case DRM_FORMAT_XRGB16161616:
5581 	case DRM_FORMAT_ARGB16161616:
5582 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616;
5583 		break;
5584 	case DRM_FORMAT_XBGR16161616:
5585 	case DRM_FORMAT_ABGR16161616:
5586 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616;
5587 		break;
5588 	default:
5589 		DRM_ERROR(
5590 			"Unsupported screen format %p4cc\n",
5591 			&fb->format->format);
5592 		return -EINVAL;
5593 	}
5594 
5595 	switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
5596 	case DRM_MODE_ROTATE_0:
5597 		plane_info->rotation = ROTATION_ANGLE_0;
5598 		break;
5599 	case DRM_MODE_ROTATE_90:
5600 		plane_info->rotation = ROTATION_ANGLE_90;
5601 		break;
5602 	case DRM_MODE_ROTATE_180:
5603 		plane_info->rotation = ROTATION_ANGLE_180;
5604 		break;
5605 	case DRM_MODE_ROTATE_270:
5606 		plane_info->rotation = ROTATION_ANGLE_270;
5607 		break;
5608 	default:
5609 		plane_info->rotation = ROTATION_ANGLE_0;
5610 		break;
5611 	}
5612 
5613 	plane_info->visible = true;
5614 	plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
5615 
5616 	plane_info->layer_index = 0;
5617 
5618 	ret = fill_plane_color_attributes(plane_state, plane_info->format,
5619 					  &plane_info->color_space);
5620 	if (ret)
5621 		return ret;
5622 
5623 	ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
5624 					   plane_info->rotation, tiling_flags,
5625 					   &plane_info->tiling_info,
5626 					   &plane_info->plane_size,
5627 					   &plane_info->dcc, address, tmz_surface,
5628 					   force_disable_dcc);
5629 	if (ret)
5630 		return ret;
5631 
5632 	fill_blending_from_plane_state(
5633 		plane_state, &plane_info->per_pixel_alpha, &plane_info->pre_multiplied_alpha,
5634 		&plane_info->global_alpha, &plane_info->global_alpha_value);
5635 
5636 	return 0;
5637 }
5638 
5639 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
5640 				    struct dc_plane_state *dc_plane_state,
5641 				    struct drm_plane_state *plane_state,
5642 				    struct drm_crtc_state *crtc_state)
5643 {
5644 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
5645 	struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
5646 	struct dc_scaling_info scaling_info;
5647 	struct dc_plane_info plane_info;
5648 	int ret;
5649 	bool force_disable_dcc = false;
5650 
5651 	ret = fill_dc_scaling_info(adev, plane_state, &scaling_info);
5652 	if (ret)
5653 		return ret;
5654 
5655 	dc_plane_state->src_rect = scaling_info.src_rect;
5656 	dc_plane_state->dst_rect = scaling_info.dst_rect;
5657 	dc_plane_state->clip_rect = scaling_info.clip_rect;
5658 	dc_plane_state->scaling_quality = scaling_info.scaling_quality;
5659 
5660 	force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
5661 	ret = fill_dc_plane_info_and_addr(adev, plane_state,
5662 					  afb->tiling_flags,
5663 					  &plane_info,
5664 					  &dc_plane_state->address,
5665 					  afb->tmz_surface,
5666 					  force_disable_dcc);
5667 	if (ret)
5668 		return ret;
5669 
5670 	dc_plane_state->format = plane_info.format;
5671 	dc_plane_state->color_space = plane_info.color_space;
5672 	dc_plane_state->format = plane_info.format;
5673 	dc_plane_state->plane_size = plane_info.plane_size;
5674 	dc_plane_state->rotation = plane_info.rotation;
5675 	dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
5676 	dc_plane_state->stereo_format = plane_info.stereo_format;
5677 	dc_plane_state->tiling_info = plane_info.tiling_info;
5678 	dc_plane_state->visible = plane_info.visible;
5679 	dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
5680 	dc_plane_state->pre_multiplied_alpha = plane_info.pre_multiplied_alpha;
5681 	dc_plane_state->global_alpha = plane_info.global_alpha;
5682 	dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
5683 	dc_plane_state->dcc = plane_info.dcc;
5684 	dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
5685 	dc_plane_state->flip_int_enabled = true;
5686 
5687 	/*
5688 	 * Always set input transfer function, since plane state is refreshed
5689 	 * every time.
5690 	 */
5691 	ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
5692 	if (ret)
5693 		return ret;
5694 
5695 	return 0;
5696 }
5697 
5698 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
5699 					   const struct dm_connector_state *dm_state,
5700 					   struct dc_stream_state *stream)
5701 {
5702 	enum amdgpu_rmx_type rmx_type;
5703 
5704 	struct rect src = { 0 }; /* viewport in composition space*/
5705 	struct rect dst = { 0 }; /* stream addressable area */
5706 
5707 	/* no mode. nothing to be done */
5708 	if (!mode)
5709 		return;
5710 
5711 	/* Full screen scaling by default */
5712 	src.width = mode->hdisplay;
5713 	src.height = mode->vdisplay;
5714 	dst.width = stream->timing.h_addressable;
5715 	dst.height = stream->timing.v_addressable;
5716 
5717 	if (dm_state) {
5718 		rmx_type = dm_state->scaling;
5719 		if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
5720 			if (src.width * dst.height <
5721 					src.height * dst.width) {
5722 				/* height needs less upscaling/more downscaling */
5723 				dst.width = src.width *
5724 						dst.height / src.height;
5725 			} else {
5726 				/* width needs less upscaling/more downscaling */
5727 				dst.height = src.height *
5728 						dst.width / src.width;
5729 			}
5730 		} else if (rmx_type == RMX_CENTER) {
5731 			dst = src;
5732 		}
5733 
5734 		dst.x = (stream->timing.h_addressable - dst.width) / 2;
5735 		dst.y = (stream->timing.v_addressable - dst.height) / 2;
5736 
5737 		if (dm_state->underscan_enable) {
5738 			dst.x += dm_state->underscan_hborder / 2;
5739 			dst.y += dm_state->underscan_vborder / 2;
5740 			dst.width -= dm_state->underscan_hborder;
5741 			dst.height -= dm_state->underscan_vborder;
5742 		}
5743 	}
5744 
5745 	stream->src = src;
5746 	stream->dst = dst;
5747 
5748 	DRM_DEBUG_KMS("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
5749 		      dst.x, dst.y, dst.width, dst.height);
5750 
5751 }
5752 
5753 static enum dc_color_depth
5754 convert_color_depth_from_display_info(const struct drm_connector *connector,
5755 				      bool is_y420, int requested_bpc)
5756 {
5757 	uint8_t bpc;
5758 
5759 	if (is_y420) {
5760 		bpc = 8;
5761 
5762 		/* Cap display bpc based on HDMI 2.0 HF-VSDB */
5763 		if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
5764 			bpc = 16;
5765 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
5766 			bpc = 12;
5767 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
5768 			bpc = 10;
5769 	} else {
5770 		bpc = (uint8_t)connector->display_info.bpc;
5771 		/* Assume 8 bpc by default if no bpc is specified. */
5772 		bpc = bpc ? bpc : 8;
5773 	}
5774 
5775 	if (requested_bpc > 0) {
5776 		/*
5777 		 * Cap display bpc based on the user requested value.
5778 		 *
5779 		 * The value for state->max_bpc may not correctly updated
5780 		 * depending on when the connector gets added to the state
5781 		 * or if this was called outside of atomic check, so it
5782 		 * can't be used directly.
5783 		 */
5784 		bpc = min_t(u8, bpc, requested_bpc);
5785 
5786 		/* Round down to the nearest even number. */
5787 		bpc = bpc - (bpc & 1);
5788 	}
5789 
5790 	switch (bpc) {
5791 	case 0:
5792 		/*
5793 		 * Temporary Work around, DRM doesn't parse color depth for
5794 		 * EDID revision before 1.4
5795 		 * TODO: Fix edid parsing
5796 		 */
5797 		return COLOR_DEPTH_888;
5798 	case 6:
5799 		return COLOR_DEPTH_666;
5800 	case 8:
5801 		return COLOR_DEPTH_888;
5802 	case 10:
5803 		return COLOR_DEPTH_101010;
5804 	case 12:
5805 		return COLOR_DEPTH_121212;
5806 	case 14:
5807 		return COLOR_DEPTH_141414;
5808 	case 16:
5809 		return COLOR_DEPTH_161616;
5810 	default:
5811 		return COLOR_DEPTH_UNDEFINED;
5812 	}
5813 }
5814 
5815 static enum dc_aspect_ratio
5816 get_aspect_ratio(const struct drm_display_mode *mode_in)
5817 {
5818 	/* 1-1 mapping, since both enums follow the HDMI spec. */
5819 	return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
5820 }
5821 
5822 static enum dc_color_space
5823 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
5824 {
5825 	enum dc_color_space color_space = COLOR_SPACE_SRGB;
5826 
5827 	switch (dc_crtc_timing->pixel_encoding)	{
5828 	case PIXEL_ENCODING_YCBCR422:
5829 	case PIXEL_ENCODING_YCBCR444:
5830 	case PIXEL_ENCODING_YCBCR420:
5831 	{
5832 		/*
5833 		 * 27030khz is the separation point between HDTV and SDTV
5834 		 * according to HDMI spec, we use YCbCr709 and YCbCr601
5835 		 * respectively
5836 		 */
5837 		if (dc_crtc_timing->pix_clk_100hz > 270300) {
5838 			if (dc_crtc_timing->flags.Y_ONLY)
5839 				color_space =
5840 					COLOR_SPACE_YCBCR709_LIMITED;
5841 			else
5842 				color_space = COLOR_SPACE_YCBCR709;
5843 		} else {
5844 			if (dc_crtc_timing->flags.Y_ONLY)
5845 				color_space =
5846 					COLOR_SPACE_YCBCR601_LIMITED;
5847 			else
5848 				color_space = COLOR_SPACE_YCBCR601;
5849 		}
5850 
5851 	}
5852 	break;
5853 	case PIXEL_ENCODING_RGB:
5854 		color_space = COLOR_SPACE_SRGB;
5855 		break;
5856 
5857 	default:
5858 		WARN_ON(1);
5859 		break;
5860 	}
5861 
5862 	return color_space;
5863 }
5864 
5865 static bool adjust_colour_depth_from_display_info(
5866 	struct dc_crtc_timing *timing_out,
5867 	const struct drm_display_info *info)
5868 {
5869 	enum dc_color_depth depth = timing_out->display_color_depth;
5870 	int normalized_clk;
5871 	do {
5872 		normalized_clk = timing_out->pix_clk_100hz / 10;
5873 		/* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5874 		if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5875 			normalized_clk /= 2;
5876 		/* Adjusting pix clock following on HDMI spec based on colour depth */
5877 		switch (depth) {
5878 		case COLOR_DEPTH_888:
5879 			break;
5880 		case COLOR_DEPTH_101010:
5881 			normalized_clk = (normalized_clk * 30) / 24;
5882 			break;
5883 		case COLOR_DEPTH_121212:
5884 			normalized_clk = (normalized_clk * 36) / 24;
5885 			break;
5886 		case COLOR_DEPTH_161616:
5887 			normalized_clk = (normalized_clk * 48) / 24;
5888 			break;
5889 		default:
5890 			/* The above depths are the only ones valid for HDMI. */
5891 			return false;
5892 		}
5893 		if (normalized_clk <= info->max_tmds_clock) {
5894 			timing_out->display_color_depth = depth;
5895 			return true;
5896 		}
5897 	} while (--depth > COLOR_DEPTH_666);
5898 	return false;
5899 }
5900 
5901 static void fill_stream_properties_from_drm_display_mode(
5902 	struct dc_stream_state *stream,
5903 	const struct drm_display_mode *mode_in,
5904 	const struct drm_connector *connector,
5905 	const struct drm_connector_state *connector_state,
5906 	const struct dc_stream_state *old_stream,
5907 	int requested_bpc)
5908 {
5909 	struct dc_crtc_timing *timing_out = &stream->timing;
5910 	const struct drm_display_info *info = &connector->display_info;
5911 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5912 	struct hdmi_vendor_infoframe hv_frame;
5913 	struct hdmi_avi_infoframe avi_frame;
5914 
5915 	memset(&hv_frame, 0, sizeof(hv_frame));
5916 	memset(&avi_frame, 0, sizeof(avi_frame));
5917 
5918 	timing_out->h_border_left = 0;
5919 	timing_out->h_border_right = 0;
5920 	timing_out->v_border_top = 0;
5921 	timing_out->v_border_bottom = 0;
5922 	/* TODO: un-hardcode */
5923 	if (drm_mode_is_420_only(info, mode_in)
5924 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5925 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5926 	else if (drm_mode_is_420_also(info, mode_in)
5927 			&& aconnector->force_yuv420_output)
5928 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5929 	else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCBCR444)
5930 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5931 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5932 	else
5933 		timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5934 
5935 	timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5936 	timing_out->display_color_depth = convert_color_depth_from_display_info(
5937 		connector,
5938 		(timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5939 		requested_bpc);
5940 	timing_out->scan_type = SCANNING_TYPE_NODATA;
5941 	timing_out->hdmi_vic = 0;
5942 
5943 	if(old_stream) {
5944 		timing_out->vic = old_stream->timing.vic;
5945 		timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5946 		timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5947 	} else {
5948 		timing_out->vic = drm_match_cea_mode(mode_in);
5949 		if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5950 			timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5951 		if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5952 			timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5953 	}
5954 
5955 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5956 		drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5957 		timing_out->vic = avi_frame.video_code;
5958 		drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5959 		timing_out->hdmi_vic = hv_frame.vic;
5960 	}
5961 
5962 	if (is_freesync_video_mode(mode_in, aconnector)) {
5963 		timing_out->h_addressable = mode_in->hdisplay;
5964 		timing_out->h_total = mode_in->htotal;
5965 		timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5966 		timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5967 		timing_out->v_total = mode_in->vtotal;
5968 		timing_out->v_addressable = mode_in->vdisplay;
5969 		timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5970 		timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5971 		timing_out->pix_clk_100hz = mode_in->clock * 10;
5972 	} else {
5973 		timing_out->h_addressable = mode_in->crtc_hdisplay;
5974 		timing_out->h_total = mode_in->crtc_htotal;
5975 		timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5976 		timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5977 		timing_out->v_total = mode_in->crtc_vtotal;
5978 		timing_out->v_addressable = mode_in->crtc_vdisplay;
5979 		timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5980 		timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5981 		timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5982 	}
5983 
5984 	timing_out->aspect_ratio = get_aspect_ratio(mode_in);
5985 
5986 	stream->output_color_space = get_output_color_space(timing_out);
5987 
5988 	stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5989 	stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
5990 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5991 		if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5992 		    drm_mode_is_420_also(info, mode_in) &&
5993 		    timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5994 			timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5995 			adjust_colour_depth_from_display_info(timing_out, info);
5996 		}
5997 	}
5998 }
5999 
6000 static void fill_audio_info(struct audio_info *audio_info,
6001 			    const struct drm_connector *drm_connector,
6002 			    const struct dc_sink *dc_sink)
6003 {
6004 	int i = 0;
6005 	int cea_revision = 0;
6006 	const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
6007 
6008 	audio_info->manufacture_id = edid_caps->manufacturer_id;
6009 	audio_info->product_id = edid_caps->product_id;
6010 
6011 	cea_revision = drm_connector->display_info.cea_rev;
6012 
6013 	strscpy(audio_info->display_name,
6014 		edid_caps->display_name,
6015 		AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
6016 
6017 	if (cea_revision >= 3) {
6018 		audio_info->mode_count = edid_caps->audio_mode_count;
6019 
6020 		for (i = 0; i < audio_info->mode_count; ++i) {
6021 			audio_info->modes[i].format_code =
6022 					(enum audio_format_code)
6023 					(edid_caps->audio_modes[i].format_code);
6024 			audio_info->modes[i].channel_count =
6025 					edid_caps->audio_modes[i].channel_count;
6026 			audio_info->modes[i].sample_rates.all =
6027 					edid_caps->audio_modes[i].sample_rate;
6028 			audio_info->modes[i].sample_size =
6029 					edid_caps->audio_modes[i].sample_size;
6030 		}
6031 	}
6032 
6033 	audio_info->flags.all = edid_caps->speaker_flags;
6034 
6035 	/* TODO: We only check for the progressive mode, check for interlace mode too */
6036 	if (drm_connector->latency_present[0]) {
6037 		audio_info->video_latency = drm_connector->video_latency[0];
6038 		audio_info->audio_latency = drm_connector->audio_latency[0];
6039 	}
6040 
6041 	/* TODO: For DP, video and audio latency should be calculated from DPCD caps */
6042 
6043 }
6044 
6045 static void
6046 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
6047 				      struct drm_display_mode *dst_mode)
6048 {
6049 	dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
6050 	dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
6051 	dst_mode->crtc_clock = src_mode->crtc_clock;
6052 	dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
6053 	dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
6054 	dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
6055 	dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
6056 	dst_mode->crtc_htotal = src_mode->crtc_htotal;
6057 	dst_mode->crtc_hskew = src_mode->crtc_hskew;
6058 	dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
6059 	dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
6060 	dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
6061 	dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
6062 	dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
6063 }
6064 
6065 static void
6066 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
6067 					const struct drm_display_mode *native_mode,
6068 					bool scale_enabled)
6069 {
6070 	if (scale_enabled) {
6071 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
6072 	} else if (native_mode->clock == drm_mode->clock &&
6073 			native_mode->htotal == drm_mode->htotal &&
6074 			native_mode->vtotal == drm_mode->vtotal) {
6075 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
6076 	} else {
6077 		/* no scaling nor amdgpu inserted, no need to patch */
6078 	}
6079 }
6080 
6081 static struct dc_sink *
6082 create_fake_sink(struct amdgpu_dm_connector *aconnector)
6083 {
6084 	struct dc_sink_init_data sink_init_data = { 0 };
6085 	struct dc_sink *sink = NULL;
6086 	sink_init_data.link = aconnector->dc_link;
6087 	sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
6088 
6089 	sink = dc_sink_create(&sink_init_data);
6090 	if (!sink) {
6091 		DRM_ERROR("Failed to create sink!\n");
6092 		return NULL;
6093 	}
6094 	sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
6095 
6096 	return sink;
6097 }
6098 
6099 static void set_multisync_trigger_params(
6100 		struct dc_stream_state *stream)
6101 {
6102 	struct dc_stream_state *master = NULL;
6103 
6104 	if (stream->triggered_crtc_reset.enabled) {
6105 		master = stream->triggered_crtc_reset.event_source;
6106 		stream->triggered_crtc_reset.event =
6107 			master->timing.flags.VSYNC_POSITIVE_POLARITY ?
6108 			CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
6109 		stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
6110 	}
6111 }
6112 
6113 static void set_master_stream(struct dc_stream_state *stream_set[],
6114 			      int stream_count)
6115 {
6116 	int j, highest_rfr = 0, master_stream = 0;
6117 
6118 	for (j = 0;  j < stream_count; j++) {
6119 		if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
6120 			int refresh_rate = 0;
6121 
6122 			refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
6123 				(stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
6124 			if (refresh_rate > highest_rfr) {
6125 				highest_rfr = refresh_rate;
6126 				master_stream = j;
6127 			}
6128 		}
6129 	}
6130 	for (j = 0;  j < stream_count; j++) {
6131 		if (stream_set[j])
6132 			stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
6133 	}
6134 }
6135 
6136 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
6137 {
6138 	int i = 0;
6139 	struct dc_stream_state *stream;
6140 
6141 	if (context->stream_count < 2)
6142 		return;
6143 	for (i = 0; i < context->stream_count ; i++) {
6144 		if (!context->streams[i])
6145 			continue;
6146 		/*
6147 		 * TODO: add a function to read AMD VSDB bits and set
6148 		 * crtc_sync_master.multi_sync_enabled flag
6149 		 * For now it's set to false
6150 		 */
6151 	}
6152 
6153 	set_master_stream(context->streams, context->stream_count);
6154 
6155 	for (i = 0; i < context->stream_count ; i++) {
6156 		stream = context->streams[i];
6157 
6158 		if (!stream)
6159 			continue;
6160 
6161 		set_multisync_trigger_params(stream);
6162 	}
6163 }
6164 
6165 #if defined(CONFIG_DRM_AMD_DC_DCN)
6166 static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
6167 							struct dc_sink *sink, struct dc_stream_state *stream,
6168 							struct dsc_dec_dpcd_caps *dsc_caps)
6169 {
6170 	stream->timing.flags.DSC = 0;
6171 	dsc_caps->is_dsc_supported = false;
6172 
6173 	if (aconnector->dc_link && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT ||
6174 		sink->sink_signal == SIGNAL_TYPE_EDP)) {
6175 		if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE ||
6176 			sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
6177 			dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
6178 				aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
6179 				aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
6180 				dsc_caps);
6181 	}
6182 }
6183 
6184 static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector,
6185 				    struct dc_sink *sink, struct dc_stream_state *stream,
6186 				    struct dsc_dec_dpcd_caps *dsc_caps,
6187 				    uint32_t max_dsc_target_bpp_limit_override)
6188 {
6189 	const struct dc_link_settings *verified_link_cap = NULL;
6190 	uint32_t link_bw_in_kbps;
6191 	uint32_t edp_min_bpp_x16, edp_max_bpp_x16;
6192 	struct dc *dc = sink->ctx->dc;
6193 	struct dc_dsc_bw_range bw_range = {0};
6194 	struct dc_dsc_config dsc_cfg = {0};
6195 
6196 	verified_link_cap = dc_link_get_link_cap(stream->link);
6197 	link_bw_in_kbps = dc_link_bandwidth_kbps(stream->link, verified_link_cap);
6198 	edp_min_bpp_x16 = 8 * 16;
6199 	edp_max_bpp_x16 = 8 * 16;
6200 
6201 	if (edp_max_bpp_x16 > dsc_caps->edp_max_bits_per_pixel)
6202 		edp_max_bpp_x16 = dsc_caps->edp_max_bits_per_pixel;
6203 
6204 	if (edp_max_bpp_x16 < edp_min_bpp_x16)
6205 		edp_min_bpp_x16 = edp_max_bpp_x16;
6206 
6207 	if (dc_dsc_compute_bandwidth_range(dc->res_pool->dscs[0],
6208 				dc->debug.dsc_min_slice_height_override,
6209 				edp_min_bpp_x16, edp_max_bpp_x16,
6210 				dsc_caps,
6211 				&stream->timing,
6212 				&bw_range)) {
6213 
6214 		if (bw_range.max_kbps < link_bw_in_kbps) {
6215 			if (dc_dsc_compute_config(dc->res_pool->dscs[0],
6216 					dsc_caps,
6217 					dc->debug.dsc_min_slice_height_override,
6218 					max_dsc_target_bpp_limit_override,
6219 					0,
6220 					&stream->timing,
6221 					&dsc_cfg)) {
6222 				stream->timing.dsc_cfg = dsc_cfg;
6223 				stream->timing.flags.DSC = 1;
6224 				stream->timing.dsc_cfg.bits_per_pixel = edp_max_bpp_x16;
6225 			}
6226 			return;
6227 		}
6228 	}
6229 
6230 	if (dc_dsc_compute_config(dc->res_pool->dscs[0],
6231 				dsc_caps,
6232 				dc->debug.dsc_min_slice_height_override,
6233 				max_dsc_target_bpp_limit_override,
6234 				link_bw_in_kbps,
6235 				&stream->timing,
6236 				&dsc_cfg)) {
6237 		stream->timing.dsc_cfg = dsc_cfg;
6238 		stream->timing.flags.DSC = 1;
6239 	}
6240 }
6241 
6242 static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
6243 										struct dc_sink *sink, struct dc_stream_state *stream,
6244 										struct dsc_dec_dpcd_caps *dsc_caps)
6245 {
6246 	struct drm_connector *drm_connector = &aconnector->base;
6247 	uint32_t link_bandwidth_kbps;
6248 	uint32_t max_dsc_target_bpp_limit_override = 0;
6249 	struct dc *dc = sink->ctx->dc;
6250 	uint32_t max_supported_bw_in_kbps, timing_bw_in_kbps;
6251 	uint32_t dsc_max_supported_bw_in_kbps;
6252 
6253 	link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
6254 							dc_link_get_link_cap(aconnector->dc_link));
6255 
6256 	if (stream->link && stream->link->local_sink)
6257 		max_dsc_target_bpp_limit_override =
6258 			stream->link->local_sink->edid_caps.panel_patch.max_dsc_target_bpp_limit;
6259 
6260 	/* Set DSC policy according to dsc_clock_en */
6261 	dc_dsc_policy_set_enable_dsc_when_not_needed(
6262 		aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
6263 
6264 	if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_EDP && !dc->debug.disable_dsc_edp &&
6265 	    dc->caps.edp_dsc_support && aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE) {
6266 
6267 		apply_dsc_policy_for_edp(aconnector, sink, stream, dsc_caps, max_dsc_target_bpp_limit_override);
6268 
6269 	} else if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6270 		if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) {
6271 			if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
6272 						dsc_caps,
6273 						aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
6274 						max_dsc_target_bpp_limit_override,
6275 						link_bandwidth_kbps,
6276 						&stream->timing,
6277 						&stream->timing.dsc_cfg)) {
6278 				stream->timing.flags.DSC = 1;
6279 				DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n",
6280 								 __func__, drm_connector->name);
6281 			}
6282 		} else if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) {
6283 			timing_bw_in_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing);
6284 			max_supported_bw_in_kbps = link_bandwidth_kbps;
6285 			dsc_max_supported_bw_in_kbps = link_bandwidth_kbps;
6286 
6287 			if (timing_bw_in_kbps > max_supported_bw_in_kbps &&
6288 					max_supported_bw_in_kbps > 0 &&
6289 					dsc_max_supported_bw_in_kbps > 0)
6290 				if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
6291 						dsc_caps,
6292 						aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
6293 						max_dsc_target_bpp_limit_override,
6294 						dsc_max_supported_bw_in_kbps,
6295 						&stream->timing,
6296 						&stream->timing.dsc_cfg)) {
6297 					stream->timing.flags.DSC = 1;
6298 					DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from DP-HDMI PCON\n",
6299 									 __func__, drm_connector->name);
6300 				}
6301 		}
6302 	}
6303 
6304 	/* Overwrite the stream flag if DSC is enabled through debugfs */
6305 	if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
6306 		stream->timing.flags.DSC = 1;
6307 
6308 	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
6309 		stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
6310 
6311 	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
6312 		stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
6313 
6314 	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
6315 		stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
6316 }
6317 #endif /* CONFIG_DRM_AMD_DC_DCN */
6318 
6319 /**
6320  * DOC: FreeSync Video
6321  *
6322  * When a userspace application wants to play a video, the content follows a
6323  * standard format definition that usually specifies the FPS for that format.
6324  * The below list illustrates some video format and the expected FPS,
6325  * respectively:
6326  *
6327  * - TV/NTSC (23.976 FPS)
6328  * - Cinema (24 FPS)
6329  * - TV/PAL (25 FPS)
6330  * - TV/NTSC (29.97 FPS)
6331  * - TV/NTSC (30 FPS)
6332  * - Cinema HFR (48 FPS)
6333  * - TV/PAL (50 FPS)
6334  * - Commonly used (60 FPS)
6335  * - Multiples of 24 (48,72,96,120 FPS)
6336  *
6337  * The list of standards video format is not huge and can be added to the
6338  * connector modeset list beforehand. With that, userspace can leverage
6339  * FreeSync to extends the front porch in order to attain the target refresh
6340  * rate. Such a switch will happen seamlessly, without screen blanking or
6341  * reprogramming of the output in any other way. If the userspace requests a
6342  * modesetting change compatible with FreeSync modes that only differ in the
6343  * refresh rate, DC will skip the full update and avoid blink during the
6344  * transition. For example, the video player can change the modesetting from
6345  * 60Hz to 30Hz for playing TV/NTSC content when it goes full screen without
6346  * causing any display blink. This same concept can be applied to a mode
6347  * setting change.
6348  */
6349 static struct drm_display_mode *
6350 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
6351 			  bool use_probed_modes)
6352 {
6353 	struct drm_display_mode *m, *m_pref = NULL;
6354 	u16 current_refresh, highest_refresh;
6355 	struct list_head *list_head = use_probed_modes ?
6356 						    &aconnector->base.probed_modes :
6357 						    &aconnector->base.modes;
6358 
6359 	if (aconnector->freesync_vid_base.clock != 0)
6360 		return &aconnector->freesync_vid_base;
6361 
6362 	/* Find the preferred mode */
6363 	list_for_each_entry (m, list_head, head) {
6364 		if (m->type & DRM_MODE_TYPE_PREFERRED) {
6365 			m_pref = m;
6366 			break;
6367 		}
6368 	}
6369 
6370 	if (!m_pref) {
6371 		/* Probably an EDID with no preferred mode. Fallback to first entry */
6372 		m_pref = list_first_entry_or_null(
6373 			&aconnector->base.modes, struct drm_display_mode, head);
6374 		if (!m_pref) {
6375 			DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
6376 			return NULL;
6377 		}
6378 	}
6379 
6380 	highest_refresh = drm_mode_vrefresh(m_pref);
6381 
6382 	/*
6383 	 * Find the mode with highest refresh rate with same resolution.
6384 	 * For some monitors, preferred mode is not the mode with highest
6385 	 * supported refresh rate.
6386 	 */
6387 	list_for_each_entry (m, list_head, head) {
6388 		current_refresh  = drm_mode_vrefresh(m);
6389 
6390 		if (m->hdisplay == m_pref->hdisplay &&
6391 		    m->vdisplay == m_pref->vdisplay &&
6392 		    highest_refresh < current_refresh) {
6393 			highest_refresh = current_refresh;
6394 			m_pref = m;
6395 		}
6396 	}
6397 
6398 	drm_mode_copy(&aconnector->freesync_vid_base, m_pref);
6399 	return m_pref;
6400 }
6401 
6402 static bool is_freesync_video_mode(const struct drm_display_mode *mode,
6403 				   struct amdgpu_dm_connector *aconnector)
6404 {
6405 	struct drm_display_mode *high_mode;
6406 	int timing_diff;
6407 
6408 	high_mode = get_highest_refresh_rate_mode(aconnector, false);
6409 	if (!high_mode || !mode)
6410 		return false;
6411 
6412 	timing_diff = high_mode->vtotal - mode->vtotal;
6413 
6414 	if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
6415 	    high_mode->hdisplay != mode->hdisplay ||
6416 	    high_mode->vdisplay != mode->vdisplay ||
6417 	    high_mode->hsync_start != mode->hsync_start ||
6418 	    high_mode->hsync_end != mode->hsync_end ||
6419 	    high_mode->htotal != mode->htotal ||
6420 	    high_mode->hskew != mode->hskew ||
6421 	    high_mode->vscan != mode->vscan ||
6422 	    high_mode->vsync_start - mode->vsync_start != timing_diff ||
6423 	    high_mode->vsync_end - mode->vsync_end != timing_diff)
6424 		return false;
6425 	else
6426 		return true;
6427 }
6428 
6429 static struct dc_stream_state *
6430 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6431 		       const struct drm_display_mode *drm_mode,
6432 		       const struct dm_connector_state *dm_state,
6433 		       const struct dc_stream_state *old_stream,
6434 		       int requested_bpc)
6435 {
6436 	struct drm_display_mode *preferred_mode = NULL;
6437 	struct drm_connector *drm_connector;
6438 	const struct drm_connector_state *con_state =
6439 		dm_state ? &dm_state->base : NULL;
6440 	struct dc_stream_state *stream = NULL;
6441 	struct drm_display_mode mode = *drm_mode;
6442 	struct drm_display_mode saved_mode;
6443 	struct drm_display_mode *freesync_mode = NULL;
6444 	bool native_mode_found = false;
6445 	bool recalculate_timing = false;
6446 	bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
6447 	int mode_refresh;
6448 	int preferred_refresh = 0;
6449 #if defined(CONFIG_DRM_AMD_DC_DCN)
6450 	struct dsc_dec_dpcd_caps dsc_caps;
6451 #endif
6452 	struct dc_sink *sink = NULL;
6453 
6454 	memset(&saved_mode, 0, sizeof(saved_mode));
6455 
6456 	if (aconnector == NULL) {
6457 		DRM_ERROR("aconnector is NULL!\n");
6458 		return stream;
6459 	}
6460 
6461 	drm_connector = &aconnector->base;
6462 
6463 	if (!aconnector->dc_sink) {
6464 		sink = create_fake_sink(aconnector);
6465 		if (!sink)
6466 			return stream;
6467 	} else {
6468 		sink = aconnector->dc_sink;
6469 		dc_sink_retain(sink);
6470 	}
6471 
6472 	stream = dc_create_stream_for_sink(sink);
6473 
6474 	if (stream == NULL) {
6475 		DRM_ERROR("Failed to create stream for sink!\n");
6476 		goto finish;
6477 	}
6478 
6479 	stream->dm_stream_context = aconnector;
6480 
6481 	stream->timing.flags.LTE_340MCSC_SCRAMBLE =
6482 		drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
6483 
6484 	list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
6485 		/* Search for preferred mode */
6486 		if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
6487 			native_mode_found = true;
6488 			break;
6489 		}
6490 	}
6491 	if (!native_mode_found)
6492 		preferred_mode = list_first_entry_or_null(
6493 				&aconnector->base.modes,
6494 				struct drm_display_mode,
6495 				head);
6496 
6497 	mode_refresh = drm_mode_vrefresh(&mode);
6498 
6499 	if (preferred_mode == NULL) {
6500 		/*
6501 		 * This may not be an error, the use case is when we have no
6502 		 * usermode calls to reset and set mode upon hotplug. In this
6503 		 * case, we call set mode ourselves to restore the previous mode
6504 		 * and the modelist may not be filled in in time.
6505 		 */
6506 		DRM_DEBUG_DRIVER("No preferred mode found\n");
6507 	} else {
6508 		recalculate_timing = is_freesync_video_mode(&mode, aconnector);
6509 		if (recalculate_timing) {
6510 			freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
6511 			drm_mode_copy(&saved_mode, &mode);
6512 			drm_mode_copy(&mode, freesync_mode);
6513 		} else {
6514 			decide_crtc_timing_for_drm_display_mode(
6515 				&mode, preferred_mode, scale);
6516 
6517 			preferred_refresh = drm_mode_vrefresh(preferred_mode);
6518 		}
6519 	}
6520 
6521 	if (recalculate_timing)
6522 		drm_mode_set_crtcinfo(&saved_mode, 0);
6523 	else if (!dm_state)
6524 		drm_mode_set_crtcinfo(&mode, 0);
6525 
6526        /*
6527 	* If scaling is enabled and refresh rate didn't change
6528 	* we copy the vic and polarities of the old timings
6529 	*/
6530 	if (!scale || mode_refresh != preferred_refresh)
6531 		fill_stream_properties_from_drm_display_mode(
6532 			stream, &mode, &aconnector->base, con_state, NULL,
6533 			requested_bpc);
6534 	else
6535 		fill_stream_properties_from_drm_display_mode(
6536 			stream, &mode, &aconnector->base, con_state, old_stream,
6537 			requested_bpc);
6538 
6539 #if defined(CONFIG_DRM_AMD_DC_DCN)
6540 	/* SST DSC determination policy */
6541 	update_dsc_caps(aconnector, sink, stream, &dsc_caps);
6542 	if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported)
6543 		apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps);
6544 #endif
6545 
6546 	update_stream_scaling_settings(&mode, dm_state, stream);
6547 
6548 	fill_audio_info(
6549 		&stream->audio_info,
6550 		drm_connector,
6551 		sink);
6552 
6553 	update_stream_signal(stream, sink);
6554 
6555 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
6556 		mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
6557 
6558 	if (stream->link->psr_settings.psr_feature_enabled) {
6559 		//
6560 		// should decide stream support vsc sdp colorimetry capability
6561 		// before building vsc info packet
6562 		//
6563 		stream->use_vsc_sdp_for_colorimetry = false;
6564 		if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
6565 			stream->use_vsc_sdp_for_colorimetry =
6566 				aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
6567 		} else {
6568 			if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
6569 				stream->use_vsc_sdp_for_colorimetry = true;
6570 		}
6571 		mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space);
6572 		aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
6573 
6574 	}
6575 finish:
6576 	dc_sink_release(sink);
6577 
6578 	return stream;
6579 }
6580 
6581 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
6582 {
6583 	drm_crtc_cleanup(crtc);
6584 	kfree(crtc);
6585 }
6586 
6587 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
6588 				  struct drm_crtc_state *state)
6589 {
6590 	struct dm_crtc_state *cur = to_dm_crtc_state(state);
6591 
6592 	/* TODO Destroy dc_stream objects are stream object is flattened */
6593 	if (cur->stream)
6594 		dc_stream_release(cur->stream);
6595 
6596 
6597 	__drm_atomic_helper_crtc_destroy_state(state);
6598 
6599 
6600 	kfree(state);
6601 }
6602 
6603 static void dm_crtc_reset_state(struct drm_crtc *crtc)
6604 {
6605 	struct dm_crtc_state *state;
6606 
6607 	if (crtc->state)
6608 		dm_crtc_destroy_state(crtc, crtc->state);
6609 
6610 	state = kzalloc(sizeof(*state), GFP_KERNEL);
6611 	if (WARN_ON(!state))
6612 		return;
6613 
6614 	__drm_atomic_helper_crtc_reset(crtc, &state->base);
6615 }
6616 
6617 static struct drm_crtc_state *
6618 dm_crtc_duplicate_state(struct drm_crtc *crtc)
6619 {
6620 	struct dm_crtc_state *state, *cur;
6621 
6622 	cur = to_dm_crtc_state(crtc->state);
6623 
6624 	if (WARN_ON(!crtc->state))
6625 		return NULL;
6626 
6627 	state = kzalloc(sizeof(*state), GFP_KERNEL);
6628 	if (!state)
6629 		return NULL;
6630 
6631 	__drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
6632 
6633 	if (cur->stream) {
6634 		state->stream = cur->stream;
6635 		dc_stream_retain(state->stream);
6636 	}
6637 
6638 	state->active_planes = cur->active_planes;
6639 	state->vrr_infopacket = cur->vrr_infopacket;
6640 	state->abm_level = cur->abm_level;
6641 	state->vrr_supported = cur->vrr_supported;
6642 	state->freesync_config = cur->freesync_config;
6643 	state->cm_has_degamma = cur->cm_has_degamma;
6644 	state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
6645 	state->force_dpms_off = cur->force_dpms_off;
6646 	/* TODO Duplicate dc_stream after objects are stream object is flattened */
6647 
6648 	return &state->base;
6649 }
6650 
6651 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
6652 static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
6653 {
6654 	crtc_debugfs_init(crtc);
6655 
6656 	return 0;
6657 }
6658 #endif
6659 
6660 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
6661 {
6662 	enum dc_irq_source irq_source;
6663 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6664 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6665 	int rc;
6666 
6667 	irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
6668 
6669 	rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
6670 
6671 	DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
6672 		      acrtc->crtc_id, enable ? "en" : "dis", rc);
6673 	return rc;
6674 }
6675 
6676 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
6677 {
6678 	enum dc_irq_source irq_source;
6679 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6680 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6681 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
6682 	struct amdgpu_display_manager *dm = &adev->dm;
6683 	struct vblank_control_work *work;
6684 	int rc = 0;
6685 
6686 	if (enable) {
6687 		/* vblank irq on -> Only need vupdate irq in vrr mode */
6688 		if (amdgpu_dm_vrr_active(acrtc_state))
6689 			rc = dm_set_vupdate_irq(crtc, true);
6690 	} else {
6691 		/* vblank irq off -> vupdate irq off */
6692 		rc = dm_set_vupdate_irq(crtc, false);
6693 	}
6694 
6695 	if (rc)
6696 		return rc;
6697 
6698 	irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
6699 
6700 	if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
6701 		return -EBUSY;
6702 
6703 	if (amdgpu_in_reset(adev))
6704 		return 0;
6705 
6706 	if (dm->vblank_control_workqueue) {
6707 		work = kzalloc(sizeof(*work), GFP_ATOMIC);
6708 		if (!work)
6709 			return -ENOMEM;
6710 
6711 		INIT_WORK(&work->work, vblank_control_worker);
6712 		work->dm = dm;
6713 		work->acrtc = acrtc;
6714 		work->enable = enable;
6715 
6716 		if (acrtc_state->stream) {
6717 			dc_stream_retain(acrtc_state->stream);
6718 			work->stream = acrtc_state->stream;
6719 		}
6720 
6721 		queue_work(dm->vblank_control_workqueue, &work->work);
6722 	}
6723 
6724 	return 0;
6725 }
6726 
6727 static int dm_enable_vblank(struct drm_crtc *crtc)
6728 {
6729 	return dm_set_vblank(crtc, true);
6730 }
6731 
6732 static void dm_disable_vblank(struct drm_crtc *crtc)
6733 {
6734 	dm_set_vblank(crtc, false);
6735 }
6736 
6737 /* Implemented only the options currently availible for the driver */
6738 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
6739 	.reset = dm_crtc_reset_state,
6740 	.destroy = amdgpu_dm_crtc_destroy,
6741 	.set_config = drm_atomic_helper_set_config,
6742 	.page_flip = drm_atomic_helper_page_flip,
6743 	.atomic_duplicate_state = dm_crtc_duplicate_state,
6744 	.atomic_destroy_state = dm_crtc_destroy_state,
6745 	.set_crc_source = amdgpu_dm_crtc_set_crc_source,
6746 	.verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
6747 	.get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
6748 	.get_vblank_counter = amdgpu_get_vblank_counter_kms,
6749 	.enable_vblank = dm_enable_vblank,
6750 	.disable_vblank = dm_disable_vblank,
6751 	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
6752 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
6753 	.late_register = amdgpu_dm_crtc_late_register,
6754 #endif
6755 };
6756 
6757 static enum drm_connector_status
6758 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
6759 {
6760 	bool connected;
6761 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6762 
6763 	/*
6764 	 * Notes:
6765 	 * 1. This interface is NOT called in context of HPD irq.
6766 	 * 2. This interface *is called* in context of user-mode ioctl. Which
6767 	 * makes it a bad place for *any* MST-related activity.
6768 	 */
6769 
6770 	if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
6771 	    !aconnector->fake_enable)
6772 		connected = (aconnector->dc_sink != NULL);
6773 	else
6774 		connected = (aconnector->base.force == DRM_FORCE_ON);
6775 
6776 	update_subconnector_property(aconnector);
6777 
6778 	return (connected ? connector_status_connected :
6779 			connector_status_disconnected);
6780 }
6781 
6782 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
6783 					    struct drm_connector_state *connector_state,
6784 					    struct drm_property *property,
6785 					    uint64_t val)
6786 {
6787 	struct drm_device *dev = connector->dev;
6788 	struct amdgpu_device *adev = drm_to_adev(dev);
6789 	struct dm_connector_state *dm_old_state =
6790 		to_dm_connector_state(connector->state);
6791 	struct dm_connector_state *dm_new_state =
6792 		to_dm_connector_state(connector_state);
6793 
6794 	int ret = -EINVAL;
6795 
6796 	if (property == dev->mode_config.scaling_mode_property) {
6797 		enum amdgpu_rmx_type rmx_type;
6798 
6799 		switch (val) {
6800 		case DRM_MODE_SCALE_CENTER:
6801 			rmx_type = RMX_CENTER;
6802 			break;
6803 		case DRM_MODE_SCALE_ASPECT:
6804 			rmx_type = RMX_ASPECT;
6805 			break;
6806 		case DRM_MODE_SCALE_FULLSCREEN:
6807 			rmx_type = RMX_FULL;
6808 			break;
6809 		case DRM_MODE_SCALE_NONE:
6810 		default:
6811 			rmx_type = RMX_OFF;
6812 			break;
6813 		}
6814 
6815 		if (dm_old_state->scaling == rmx_type)
6816 			return 0;
6817 
6818 		dm_new_state->scaling = rmx_type;
6819 		ret = 0;
6820 	} else if (property == adev->mode_info.underscan_hborder_property) {
6821 		dm_new_state->underscan_hborder = val;
6822 		ret = 0;
6823 	} else if (property == adev->mode_info.underscan_vborder_property) {
6824 		dm_new_state->underscan_vborder = val;
6825 		ret = 0;
6826 	} else if (property == adev->mode_info.underscan_property) {
6827 		dm_new_state->underscan_enable = val;
6828 		ret = 0;
6829 	} else if (property == adev->mode_info.abm_level_property) {
6830 		dm_new_state->abm_level = val;
6831 		ret = 0;
6832 	}
6833 
6834 	return ret;
6835 }
6836 
6837 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
6838 					    const struct drm_connector_state *state,
6839 					    struct drm_property *property,
6840 					    uint64_t *val)
6841 {
6842 	struct drm_device *dev = connector->dev;
6843 	struct amdgpu_device *adev = drm_to_adev(dev);
6844 	struct dm_connector_state *dm_state =
6845 		to_dm_connector_state(state);
6846 	int ret = -EINVAL;
6847 
6848 	if (property == dev->mode_config.scaling_mode_property) {
6849 		switch (dm_state->scaling) {
6850 		case RMX_CENTER:
6851 			*val = DRM_MODE_SCALE_CENTER;
6852 			break;
6853 		case RMX_ASPECT:
6854 			*val = DRM_MODE_SCALE_ASPECT;
6855 			break;
6856 		case RMX_FULL:
6857 			*val = DRM_MODE_SCALE_FULLSCREEN;
6858 			break;
6859 		case RMX_OFF:
6860 		default:
6861 			*val = DRM_MODE_SCALE_NONE;
6862 			break;
6863 		}
6864 		ret = 0;
6865 	} else if (property == adev->mode_info.underscan_hborder_property) {
6866 		*val = dm_state->underscan_hborder;
6867 		ret = 0;
6868 	} else if (property == adev->mode_info.underscan_vborder_property) {
6869 		*val = dm_state->underscan_vborder;
6870 		ret = 0;
6871 	} else if (property == adev->mode_info.underscan_property) {
6872 		*val = dm_state->underscan_enable;
6873 		ret = 0;
6874 	} else if (property == adev->mode_info.abm_level_property) {
6875 		*val = dm_state->abm_level;
6876 		ret = 0;
6877 	}
6878 
6879 	return ret;
6880 }
6881 
6882 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
6883 {
6884 	struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
6885 
6886 	drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
6887 }
6888 
6889 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
6890 {
6891 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6892 	const struct dc_link *link = aconnector->dc_link;
6893 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
6894 	struct amdgpu_display_manager *dm = &adev->dm;
6895 	int i;
6896 
6897 	/*
6898 	 * Call only if mst_mgr was iniitalized before since it's not done
6899 	 * for all connector types.
6900 	 */
6901 	if (aconnector->mst_mgr.dev)
6902 		drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
6903 
6904 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
6905 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
6906 	for (i = 0; i < dm->num_of_edps; i++) {
6907 		if ((link == dm->backlight_link[i]) && dm->backlight_dev[i]) {
6908 			backlight_device_unregister(dm->backlight_dev[i]);
6909 			dm->backlight_dev[i] = NULL;
6910 		}
6911 	}
6912 #endif
6913 
6914 	if (aconnector->dc_em_sink)
6915 		dc_sink_release(aconnector->dc_em_sink);
6916 	aconnector->dc_em_sink = NULL;
6917 	if (aconnector->dc_sink)
6918 		dc_sink_release(aconnector->dc_sink);
6919 	aconnector->dc_sink = NULL;
6920 
6921 	drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
6922 	drm_connector_unregister(connector);
6923 	drm_connector_cleanup(connector);
6924 	if (aconnector->i2c) {
6925 		i2c_del_adapter(&aconnector->i2c->base);
6926 		kfree(aconnector->i2c);
6927 	}
6928 	kfree(aconnector->dm_dp_aux.aux.name);
6929 
6930 	kfree(connector);
6931 }
6932 
6933 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
6934 {
6935 	struct dm_connector_state *state =
6936 		to_dm_connector_state(connector->state);
6937 
6938 	if (connector->state)
6939 		__drm_atomic_helper_connector_destroy_state(connector->state);
6940 
6941 	kfree(state);
6942 
6943 	state = kzalloc(sizeof(*state), GFP_KERNEL);
6944 
6945 	if (state) {
6946 		state->scaling = RMX_OFF;
6947 		state->underscan_enable = false;
6948 		state->underscan_hborder = 0;
6949 		state->underscan_vborder = 0;
6950 		state->base.max_requested_bpc = 8;
6951 		state->vcpi_slots = 0;
6952 		state->pbn = 0;
6953 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
6954 			state->abm_level = amdgpu_dm_abm_level;
6955 
6956 		__drm_atomic_helper_connector_reset(connector, &state->base);
6957 	}
6958 }
6959 
6960 struct drm_connector_state *
6961 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
6962 {
6963 	struct dm_connector_state *state =
6964 		to_dm_connector_state(connector->state);
6965 
6966 	struct dm_connector_state *new_state =
6967 			kmemdup(state, sizeof(*state), GFP_KERNEL);
6968 
6969 	if (!new_state)
6970 		return NULL;
6971 
6972 	__drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
6973 
6974 	new_state->freesync_capable = state->freesync_capable;
6975 	new_state->abm_level = state->abm_level;
6976 	new_state->scaling = state->scaling;
6977 	new_state->underscan_enable = state->underscan_enable;
6978 	new_state->underscan_hborder = state->underscan_hborder;
6979 	new_state->underscan_vborder = state->underscan_vborder;
6980 	new_state->vcpi_slots = state->vcpi_slots;
6981 	new_state->pbn = state->pbn;
6982 	return &new_state->base;
6983 }
6984 
6985 static int
6986 amdgpu_dm_connector_late_register(struct drm_connector *connector)
6987 {
6988 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6989 		to_amdgpu_dm_connector(connector);
6990 	int r;
6991 
6992 	if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
6993 	    (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
6994 		amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
6995 		r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
6996 		if (r)
6997 			return r;
6998 	}
6999 
7000 #if defined(CONFIG_DEBUG_FS)
7001 	connector_debugfs_init(amdgpu_dm_connector);
7002 #endif
7003 
7004 	return 0;
7005 }
7006 
7007 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
7008 	.reset = amdgpu_dm_connector_funcs_reset,
7009 	.detect = amdgpu_dm_connector_detect,
7010 	.fill_modes = drm_helper_probe_single_connector_modes,
7011 	.destroy = amdgpu_dm_connector_destroy,
7012 	.atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
7013 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
7014 	.atomic_set_property = amdgpu_dm_connector_atomic_set_property,
7015 	.atomic_get_property = amdgpu_dm_connector_atomic_get_property,
7016 	.late_register = amdgpu_dm_connector_late_register,
7017 	.early_unregister = amdgpu_dm_connector_unregister
7018 };
7019 
7020 static int get_modes(struct drm_connector *connector)
7021 {
7022 	return amdgpu_dm_connector_get_modes(connector);
7023 }
7024 
7025 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
7026 {
7027 	struct dc_sink_init_data init_params = {
7028 			.link = aconnector->dc_link,
7029 			.sink_signal = SIGNAL_TYPE_VIRTUAL
7030 	};
7031 	struct edid *edid;
7032 
7033 	if (!aconnector->base.edid_blob_ptr) {
7034 		DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
7035 				aconnector->base.name);
7036 
7037 		aconnector->base.force = DRM_FORCE_OFF;
7038 		aconnector->base.override_edid = false;
7039 		return;
7040 	}
7041 
7042 	edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
7043 
7044 	aconnector->edid = edid;
7045 
7046 	aconnector->dc_em_sink = dc_link_add_remote_sink(
7047 		aconnector->dc_link,
7048 		(uint8_t *)edid,
7049 		(edid->extensions + 1) * EDID_LENGTH,
7050 		&init_params);
7051 
7052 	if (aconnector->base.force == DRM_FORCE_ON) {
7053 		aconnector->dc_sink = aconnector->dc_link->local_sink ?
7054 		aconnector->dc_link->local_sink :
7055 		aconnector->dc_em_sink;
7056 		dc_sink_retain(aconnector->dc_sink);
7057 	}
7058 }
7059 
7060 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
7061 {
7062 	struct dc_link *link = (struct dc_link *)aconnector->dc_link;
7063 
7064 	/*
7065 	 * In case of headless boot with force on for DP managed connector
7066 	 * Those settings have to be != 0 to get initial modeset
7067 	 */
7068 	if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
7069 		link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
7070 		link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
7071 	}
7072 
7073 
7074 	aconnector->base.override_edid = true;
7075 	create_eml_sink(aconnector);
7076 }
7077 
7078 struct dc_stream_state *
7079 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
7080 				const struct drm_display_mode *drm_mode,
7081 				const struct dm_connector_state *dm_state,
7082 				const struct dc_stream_state *old_stream)
7083 {
7084 	struct drm_connector *connector = &aconnector->base;
7085 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
7086 	struct dc_stream_state *stream;
7087 	const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
7088 	int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
7089 	enum dc_status dc_result = DC_OK;
7090 
7091 	do {
7092 		stream = create_stream_for_sink(aconnector, drm_mode,
7093 						dm_state, old_stream,
7094 						requested_bpc);
7095 		if (stream == NULL) {
7096 			DRM_ERROR("Failed to create stream for sink!\n");
7097 			break;
7098 		}
7099 
7100 		dc_result = dc_validate_stream(adev->dm.dc, stream);
7101 
7102 		if (dc_result != DC_OK) {
7103 			DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
7104 				      drm_mode->hdisplay,
7105 				      drm_mode->vdisplay,
7106 				      drm_mode->clock,
7107 				      dc_result,
7108 				      dc_status_to_str(dc_result));
7109 
7110 			dc_stream_release(stream);
7111 			stream = NULL;
7112 			requested_bpc -= 2; /* lower bpc to retry validation */
7113 		}
7114 
7115 	} while (stream == NULL && requested_bpc >= 6);
7116 
7117 	if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
7118 		DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
7119 
7120 		aconnector->force_yuv420_output = true;
7121 		stream = create_validate_stream_for_sink(aconnector, drm_mode,
7122 						dm_state, old_stream);
7123 		aconnector->force_yuv420_output = false;
7124 	}
7125 
7126 	return stream;
7127 }
7128 
7129 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
7130 				   struct drm_display_mode *mode)
7131 {
7132 	int result = MODE_ERROR;
7133 	struct dc_sink *dc_sink;
7134 	/* TODO: Unhardcode stream count */
7135 	struct dc_stream_state *stream;
7136 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7137 
7138 	if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
7139 			(mode->flags & DRM_MODE_FLAG_DBLSCAN))
7140 		return result;
7141 
7142 	/*
7143 	 * Only run this the first time mode_valid is called to initilialize
7144 	 * EDID mgmt
7145 	 */
7146 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
7147 		!aconnector->dc_em_sink)
7148 		handle_edid_mgmt(aconnector);
7149 
7150 	dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
7151 
7152 	if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
7153 				aconnector->base.force != DRM_FORCE_ON) {
7154 		DRM_ERROR("dc_sink is NULL!\n");
7155 		goto fail;
7156 	}
7157 
7158 	stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
7159 	if (stream) {
7160 		dc_stream_release(stream);
7161 		result = MODE_OK;
7162 	}
7163 
7164 fail:
7165 	/* TODO: error handling*/
7166 	return result;
7167 }
7168 
7169 static int fill_hdr_info_packet(const struct drm_connector_state *state,
7170 				struct dc_info_packet *out)
7171 {
7172 	struct hdmi_drm_infoframe frame;
7173 	unsigned char buf[30]; /* 26 + 4 */
7174 	ssize_t len;
7175 	int ret, i;
7176 
7177 	memset(out, 0, sizeof(*out));
7178 
7179 	if (!state->hdr_output_metadata)
7180 		return 0;
7181 
7182 	ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
7183 	if (ret)
7184 		return ret;
7185 
7186 	len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
7187 	if (len < 0)
7188 		return (int)len;
7189 
7190 	/* Static metadata is a fixed 26 bytes + 4 byte header. */
7191 	if (len != 30)
7192 		return -EINVAL;
7193 
7194 	/* Prepare the infopacket for DC. */
7195 	switch (state->connector->connector_type) {
7196 	case DRM_MODE_CONNECTOR_HDMIA:
7197 		out->hb0 = 0x87; /* type */
7198 		out->hb1 = 0x01; /* version */
7199 		out->hb2 = 0x1A; /* length */
7200 		out->sb[0] = buf[3]; /* checksum */
7201 		i = 1;
7202 		break;
7203 
7204 	case DRM_MODE_CONNECTOR_DisplayPort:
7205 	case DRM_MODE_CONNECTOR_eDP:
7206 		out->hb0 = 0x00; /* sdp id, zero */
7207 		out->hb1 = 0x87; /* type */
7208 		out->hb2 = 0x1D; /* payload len - 1 */
7209 		out->hb3 = (0x13 << 2); /* sdp version */
7210 		out->sb[0] = 0x01; /* version */
7211 		out->sb[1] = 0x1A; /* length */
7212 		i = 2;
7213 		break;
7214 
7215 	default:
7216 		return -EINVAL;
7217 	}
7218 
7219 	memcpy(&out->sb[i], &buf[4], 26);
7220 	out->valid = true;
7221 
7222 	print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
7223 		       sizeof(out->sb), false);
7224 
7225 	return 0;
7226 }
7227 
7228 static int
7229 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
7230 				 struct drm_atomic_state *state)
7231 {
7232 	struct drm_connector_state *new_con_state =
7233 		drm_atomic_get_new_connector_state(state, conn);
7234 	struct drm_connector_state *old_con_state =
7235 		drm_atomic_get_old_connector_state(state, conn);
7236 	struct drm_crtc *crtc = new_con_state->crtc;
7237 	struct drm_crtc_state *new_crtc_state;
7238 	int ret;
7239 
7240 	trace_amdgpu_dm_connector_atomic_check(new_con_state);
7241 
7242 	if (!crtc)
7243 		return 0;
7244 
7245 	if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
7246 		struct dc_info_packet hdr_infopacket;
7247 
7248 		ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
7249 		if (ret)
7250 			return ret;
7251 
7252 		new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
7253 		if (IS_ERR(new_crtc_state))
7254 			return PTR_ERR(new_crtc_state);
7255 
7256 		/*
7257 		 * DC considers the stream backends changed if the
7258 		 * static metadata changes. Forcing the modeset also
7259 		 * gives a simple way for userspace to switch from
7260 		 * 8bpc to 10bpc when setting the metadata to enter
7261 		 * or exit HDR.
7262 		 *
7263 		 * Changing the static metadata after it's been
7264 		 * set is permissible, however. So only force a
7265 		 * modeset if we're entering or exiting HDR.
7266 		 */
7267 		new_crtc_state->mode_changed =
7268 			!old_con_state->hdr_output_metadata ||
7269 			!new_con_state->hdr_output_metadata;
7270 	}
7271 
7272 	return 0;
7273 }
7274 
7275 static const struct drm_connector_helper_funcs
7276 amdgpu_dm_connector_helper_funcs = {
7277 	/*
7278 	 * If hotplugging a second bigger display in FB Con mode, bigger resolution
7279 	 * modes will be filtered by drm_mode_validate_size(), and those modes
7280 	 * are missing after user start lightdm. So we need to renew modes list.
7281 	 * in get_modes call back, not just return the modes count
7282 	 */
7283 	.get_modes = get_modes,
7284 	.mode_valid = amdgpu_dm_connector_mode_valid,
7285 	.atomic_check = amdgpu_dm_connector_atomic_check,
7286 };
7287 
7288 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
7289 {
7290 }
7291 
7292 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
7293 {
7294 	struct drm_atomic_state *state = new_crtc_state->state;
7295 	struct drm_plane *plane;
7296 	int num_active = 0;
7297 
7298 	drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
7299 		struct drm_plane_state *new_plane_state;
7300 
7301 		/* Cursor planes are "fake". */
7302 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
7303 			continue;
7304 
7305 		new_plane_state = drm_atomic_get_new_plane_state(state, plane);
7306 
7307 		if (!new_plane_state) {
7308 			/*
7309 			 * The plane is enable on the CRTC and hasn't changed
7310 			 * state. This means that it previously passed
7311 			 * validation and is therefore enabled.
7312 			 */
7313 			num_active += 1;
7314 			continue;
7315 		}
7316 
7317 		/* We need a framebuffer to be considered enabled. */
7318 		num_active += (new_plane_state->fb != NULL);
7319 	}
7320 
7321 	return num_active;
7322 }
7323 
7324 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
7325 					 struct drm_crtc_state *new_crtc_state)
7326 {
7327 	struct dm_crtc_state *dm_new_crtc_state =
7328 		to_dm_crtc_state(new_crtc_state);
7329 
7330 	dm_new_crtc_state->active_planes = 0;
7331 
7332 	if (!dm_new_crtc_state->stream)
7333 		return;
7334 
7335 	dm_new_crtc_state->active_planes =
7336 		count_crtc_active_planes(new_crtc_state);
7337 }
7338 
7339 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
7340 				       struct drm_atomic_state *state)
7341 {
7342 	struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
7343 									  crtc);
7344 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
7345 	struct dc *dc = adev->dm.dc;
7346 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
7347 	int ret = -EINVAL;
7348 
7349 	trace_amdgpu_dm_crtc_atomic_check(crtc_state);
7350 
7351 	dm_update_crtc_active_planes(crtc, crtc_state);
7352 
7353 	if (WARN_ON(unlikely(!dm_crtc_state->stream &&
7354 		     modeset_required(crtc_state, NULL, dm_crtc_state->stream)))) {
7355 		return ret;
7356 	}
7357 
7358 	/*
7359 	 * We require the primary plane to be enabled whenever the CRTC is, otherwise
7360 	 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
7361 	 * planes are disabled, which is not supported by the hardware. And there is legacy
7362 	 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
7363 	 */
7364 	if (crtc_state->enable &&
7365 	    !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
7366 		DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
7367 		return -EINVAL;
7368 	}
7369 
7370 	/* In some use cases, like reset, no stream is attached */
7371 	if (!dm_crtc_state->stream)
7372 		return 0;
7373 
7374 	if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
7375 		return 0;
7376 
7377 	DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
7378 	return ret;
7379 }
7380 
7381 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
7382 				      const struct drm_display_mode *mode,
7383 				      struct drm_display_mode *adjusted_mode)
7384 {
7385 	return true;
7386 }
7387 
7388 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
7389 	.disable = dm_crtc_helper_disable,
7390 	.atomic_check = dm_crtc_helper_atomic_check,
7391 	.mode_fixup = dm_crtc_helper_mode_fixup,
7392 	.get_scanout_position = amdgpu_crtc_get_scanout_position,
7393 };
7394 
7395 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
7396 {
7397 
7398 }
7399 
7400 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
7401 {
7402 	switch (display_color_depth) {
7403 		case COLOR_DEPTH_666:
7404 			return 6;
7405 		case COLOR_DEPTH_888:
7406 			return 8;
7407 		case COLOR_DEPTH_101010:
7408 			return 10;
7409 		case COLOR_DEPTH_121212:
7410 			return 12;
7411 		case COLOR_DEPTH_141414:
7412 			return 14;
7413 		case COLOR_DEPTH_161616:
7414 			return 16;
7415 		default:
7416 			break;
7417 		}
7418 	return 0;
7419 }
7420 
7421 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
7422 					  struct drm_crtc_state *crtc_state,
7423 					  struct drm_connector_state *conn_state)
7424 {
7425 	struct drm_atomic_state *state = crtc_state->state;
7426 	struct drm_connector *connector = conn_state->connector;
7427 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7428 	struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
7429 	const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
7430 	struct drm_dp_mst_topology_mgr *mst_mgr;
7431 	struct drm_dp_mst_port *mst_port;
7432 	enum dc_color_depth color_depth;
7433 	int clock, bpp = 0;
7434 	bool is_y420 = false;
7435 
7436 	if (!aconnector->port || !aconnector->dc_sink)
7437 		return 0;
7438 
7439 	mst_port = aconnector->port;
7440 	mst_mgr = &aconnector->mst_port->mst_mgr;
7441 
7442 	if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
7443 		return 0;
7444 
7445 	if (!state->duplicated) {
7446 		int max_bpc = conn_state->max_requested_bpc;
7447 		is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
7448 				aconnector->force_yuv420_output;
7449 		color_depth = convert_color_depth_from_display_info(connector,
7450 								    is_y420,
7451 								    max_bpc);
7452 		bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
7453 		clock = adjusted_mode->clock;
7454 		dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
7455 	}
7456 	dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
7457 									   mst_mgr,
7458 									   mst_port,
7459 									   dm_new_connector_state->pbn,
7460 									   dm_mst_get_pbn_divider(aconnector->dc_link));
7461 	if (dm_new_connector_state->vcpi_slots < 0) {
7462 		DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
7463 		return dm_new_connector_state->vcpi_slots;
7464 	}
7465 	return 0;
7466 }
7467 
7468 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
7469 	.disable = dm_encoder_helper_disable,
7470 	.atomic_check = dm_encoder_helper_atomic_check
7471 };
7472 
7473 #if defined(CONFIG_DRM_AMD_DC_DCN)
7474 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
7475 					    struct dc_state *dc_state,
7476 					    struct dsc_mst_fairness_vars *vars)
7477 {
7478 	struct dc_stream_state *stream = NULL;
7479 	struct drm_connector *connector;
7480 	struct drm_connector_state *new_con_state;
7481 	struct amdgpu_dm_connector *aconnector;
7482 	struct dm_connector_state *dm_conn_state;
7483 	int i, j;
7484 	int vcpi, pbn_div, pbn, slot_num = 0;
7485 
7486 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
7487 
7488 		aconnector = to_amdgpu_dm_connector(connector);
7489 
7490 		if (!aconnector->port)
7491 			continue;
7492 
7493 		if (!new_con_state || !new_con_state->crtc)
7494 			continue;
7495 
7496 		dm_conn_state = to_dm_connector_state(new_con_state);
7497 
7498 		for (j = 0; j < dc_state->stream_count; j++) {
7499 			stream = dc_state->streams[j];
7500 			if (!stream)
7501 				continue;
7502 
7503 			if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
7504 				break;
7505 
7506 			stream = NULL;
7507 		}
7508 
7509 		if (!stream)
7510 			continue;
7511 
7512 		pbn_div = dm_mst_get_pbn_divider(stream->link);
7513 		/* pbn is calculated by compute_mst_dsc_configs_for_state*/
7514 		for (j = 0; j < dc_state->stream_count; j++) {
7515 			if (vars[j].aconnector == aconnector) {
7516 				pbn = vars[j].pbn;
7517 				break;
7518 			}
7519 		}
7520 
7521 		if (j == dc_state->stream_count)
7522 			continue;
7523 
7524 		slot_num = DIV_ROUND_UP(pbn, pbn_div);
7525 
7526 		if (stream->timing.flags.DSC != 1) {
7527 			dm_conn_state->pbn = pbn;
7528 			dm_conn_state->vcpi_slots = slot_num;
7529 
7530 			drm_dp_mst_atomic_enable_dsc(state,
7531 						     aconnector->port,
7532 						     dm_conn_state->pbn,
7533 						     0,
7534 						     false);
7535 			continue;
7536 		}
7537 
7538 		vcpi = drm_dp_mst_atomic_enable_dsc(state,
7539 						    aconnector->port,
7540 						    pbn, pbn_div,
7541 						    true);
7542 		if (vcpi < 0)
7543 			return vcpi;
7544 
7545 		dm_conn_state->pbn = pbn;
7546 		dm_conn_state->vcpi_slots = vcpi;
7547 	}
7548 	return 0;
7549 }
7550 #endif
7551 
7552 static void dm_drm_plane_reset(struct drm_plane *plane)
7553 {
7554 	struct dm_plane_state *amdgpu_state = NULL;
7555 
7556 	if (plane->state)
7557 		plane->funcs->atomic_destroy_state(plane, plane->state);
7558 
7559 	amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
7560 	WARN_ON(amdgpu_state == NULL);
7561 
7562 	if (amdgpu_state)
7563 		__drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
7564 }
7565 
7566 static struct drm_plane_state *
7567 dm_drm_plane_duplicate_state(struct drm_plane *plane)
7568 {
7569 	struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
7570 
7571 	old_dm_plane_state = to_dm_plane_state(plane->state);
7572 	dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
7573 	if (!dm_plane_state)
7574 		return NULL;
7575 
7576 	__drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
7577 
7578 	if (old_dm_plane_state->dc_state) {
7579 		dm_plane_state->dc_state = old_dm_plane_state->dc_state;
7580 		dc_plane_state_retain(dm_plane_state->dc_state);
7581 	}
7582 
7583 	return &dm_plane_state->base;
7584 }
7585 
7586 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
7587 				struct drm_plane_state *state)
7588 {
7589 	struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
7590 
7591 	if (dm_plane_state->dc_state)
7592 		dc_plane_state_release(dm_plane_state->dc_state);
7593 
7594 	drm_atomic_helper_plane_destroy_state(plane, state);
7595 }
7596 
7597 static const struct drm_plane_funcs dm_plane_funcs = {
7598 	.update_plane	= drm_atomic_helper_update_plane,
7599 	.disable_plane	= drm_atomic_helper_disable_plane,
7600 	.destroy	= drm_primary_helper_destroy,
7601 	.reset = dm_drm_plane_reset,
7602 	.atomic_duplicate_state = dm_drm_plane_duplicate_state,
7603 	.atomic_destroy_state = dm_drm_plane_destroy_state,
7604 	.format_mod_supported = dm_plane_format_mod_supported,
7605 };
7606 
7607 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
7608 				      struct drm_plane_state *new_state)
7609 {
7610 	struct amdgpu_framebuffer *afb;
7611 	struct drm_gem_object *obj;
7612 	struct amdgpu_device *adev;
7613 	struct amdgpu_bo *rbo;
7614 	struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
7615 	uint32_t domain;
7616 	int r;
7617 
7618 	if (!new_state->fb) {
7619 		DRM_DEBUG_KMS("No FB bound\n");
7620 		return 0;
7621 	}
7622 
7623 	afb = to_amdgpu_framebuffer(new_state->fb);
7624 	obj = new_state->fb->obj[0];
7625 	rbo = gem_to_amdgpu_bo(obj);
7626 	adev = amdgpu_ttm_adev(rbo->tbo.bdev);
7627 
7628 	r = amdgpu_bo_reserve(rbo, true);
7629 	if (r) {
7630 		dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
7631 		return r;
7632 	}
7633 
7634 	r = dma_resv_reserve_fences(rbo->tbo.base.resv, 1);
7635 	if (r) {
7636 		dev_err(adev->dev, "reserving fence slot failed (%d)\n", r);
7637 		goto error_unlock;
7638 	}
7639 
7640 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
7641 		domain = amdgpu_display_supported_domains(adev, rbo->flags);
7642 	else
7643 		domain = AMDGPU_GEM_DOMAIN_VRAM;
7644 
7645 	r = amdgpu_bo_pin(rbo, domain);
7646 	if (unlikely(r != 0)) {
7647 		if (r != -ERESTARTSYS)
7648 			DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
7649 		goto error_unlock;
7650 	}
7651 
7652 	r = amdgpu_ttm_alloc_gart(&rbo->tbo);
7653 	if (unlikely(r != 0)) {
7654 		DRM_ERROR("%p bind failed\n", rbo);
7655 		goto error_unpin;
7656 	}
7657 
7658 	amdgpu_bo_unreserve(rbo);
7659 
7660 	afb->address = amdgpu_bo_gpu_offset(rbo);
7661 
7662 	amdgpu_bo_ref(rbo);
7663 
7664 	/**
7665 	 * We don't do surface updates on planes that have been newly created,
7666 	 * but we also don't have the afb->address during atomic check.
7667 	 *
7668 	 * Fill in buffer attributes depending on the address here, but only on
7669 	 * newly created planes since they're not being used by DC yet and this
7670 	 * won't modify global state.
7671 	 */
7672 	dm_plane_state_old = to_dm_plane_state(plane->state);
7673 	dm_plane_state_new = to_dm_plane_state(new_state);
7674 
7675 	if (dm_plane_state_new->dc_state &&
7676 	    dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
7677 		struct dc_plane_state *plane_state =
7678 			dm_plane_state_new->dc_state;
7679 		bool force_disable_dcc = !plane_state->dcc.enable;
7680 
7681 		fill_plane_buffer_attributes(
7682 			adev, afb, plane_state->format, plane_state->rotation,
7683 			afb->tiling_flags,
7684 			&plane_state->tiling_info, &plane_state->plane_size,
7685 			&plane_state->dcc, &plane_state->address,
7686 			afb->tmz_surface, force_disable_dcc);
7687 	}
7688 
7689 	return 0;
7690 
7691 error_unpin:
7692 	amdgpu_bo_unpin(rbo);
7693 
7694 error_unlock:
7695 	amdgpu_bo_unreserve(rbo);
7696 	return r;
7697 }
7698 
7699 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
7700 				       struct drm_plane_state *old_state)
7701 {
7702 	struct amdgpu_bo *rbo;
7703 	int r;
7704 
7705 	if (!old_state->fb)
7706 		return;
7707 
7708 	rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
7709 	r = amdgpu_bo_reserve(rbo, false);
7710 	if (unlikely(r)) {
7711 		DRM_ERROR("failed to reserve rbo before unpin\n");
7712 		return;
7713 	}
7714 
7715 	amdgpu_bo_unpin(rbo);
7716 	amdgpu_bo_unreserve(rbo);
7717 	amdgpu_bo_unref(&rbo);
7718 }
7719 
7720 static int dm_plane_helper_check_state(struct drm_plane_state *state,
7721 				       struct drm_crtc_state *new_crtc_state)
7722 {
7723 	struct drm_framebuffer *fb = state->fb;
7724 	int min_downscale, max_upscale;
7725 	int min_scale = 0;
7726 	int max_scale = INT_MAX;
7727 
7728 	/* Plane enabled? Validate viewport and get scaling factors from plane caps. */
7729 	if (fb && state->crtc) {
7730 		/* Validate viewport to cover the case when only the position changes */
7731 		if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
7732 			int viewport_width = state->crtc_w;
7733 			int viewport_height = state->crtc_h;
7734 
7735 			if (state->crtc_x < 0)
7736 				viewport_width += state->crtc_x;
7737 			else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
7738 				viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
7739 
7740 			if (state->crtc_y < 0)
7741 				viewport_height += state->crtc_y;
7742 			else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
7743 				viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
7744 
7745 			if (viewport_width < 0 || viewport_height < 0) {
7746 				DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
7747 				return -EINVAL;
7748 			} else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
7749 				DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
7750 				return -EINVAL;
7751 			} else if (viewport_height < MIN_VIEWPORT_SIZE) {
7752 				DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
7753 				return -EINVAL;
7754 			}
7755 
7756 		}
7757 
7758 		/* Get min/max allowed scaling factors from plane caps. */
7759 		get_min_max_dc_plane_scaling(state->crtc->dev, fb,
7760 					     &min_downscale, &max_upscale);
7761 		/*
7762 		 * Convert to drm convention: 16.16 fixed point, instead of dc's
7763 		 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
7764 		 * dst/src, so min_scale = 1.0 / max_upscale, etc.
7765 		 */
7766 		min_scale = (1000 << 16) / max_upscale;
7767 		max_scale = (1000 << 16) / min_downscale;
7768 	}
7769 
7770 	return drm_atomic_helper_check_plane_state(
7771 		state, new_crtc_state, min_scale, max_scale, true, true);
7772 }
7773 
7774 static int dm_plane_atomic_check(struct drm_plane *plane,
7775 				 struct drm_atomic_state *state)
7776 {
7777 	struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
7778 										 plane);
7779 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
7780 	struct dc *dc = adev->dm.dc;
7781 	struct dm_plane_state *dm_plane_state;
7782 	struct dc_scaling_info scaling_info;
7783 	struct drm_crtc_state *new_crtc_state;
7784 	int ret;
7785 
7786 	trace_amdgpu_dm_plane_atomic_check(new_plane_state);
7787 
7788 	dm_plane_state = to_dm_plane_state(new_plane_state);
7789 
7790 	if (!dm_plane_state->dc_state)
7791 		return 0;
7792 
7793 	new_crtc_state =
7794 		drm_atomic_get_new_crtc_state(state,
7795 					      new_plane_state->crtc);
7796 	if (!new_crtc_state)
7797 		return -EINVAL;
7798 
7799 	ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
7800 	if (ret)
7801 		return ret;
7802 
7803 	ret = fill_dc_scaling_info(adev, new_plane_state, &scaling_info);
7804 	if (ret)
7805 		return ret;
7806 
7807 	if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
7808 		return 0;
7809 
7810 	return -EINVAL;
7811 }
7812 
7813 static int dm_plane_atomic_async_check(struct drm_plane *plane,
7814 				       struct drm_atomic_state *state)
7815 {
7816 	/* Only support async updates on cursor planes. */
7817 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
7818 		return -EINVAL;
7819 
7820 	return 0;
7821 }
7822 
7823 static void dm_plane_atomic_async_update(struct drm_plane *plane,
7824 					 struct drm_atomic_state *state)
7825 {
7826 	struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
7827 									   plane);
7828 	struct drm_plane_state *old_state =
7829 		drm_atomic_get_old_plane_state(state, plane);
7830 
7831 	trace_amdgpu_dm_atomic_update_cursor(new_state);
7832 
7833 	swap(plane->state->fb, new_state->fb);
7834 
7835 	plane->state->src_x = new_state->src_x;
7836 	plane->state->src_y = new_state->src_y;
7837 	plane->state->src_w = new_state->src_w;
7838 	plane->state->src_h = new_state->src_h;
7839 	plane->state->crtc_x = new_state->crtc_x;
7840 	plane->state->crtc_y = new_state->crtc_y;
7841 	plane->state->crtc_w = new_state->crtc_w;
7842 	plane->state->crtc_h = new_state->crtc_h;
7843 
7844 	handle_cursor_update(plane, old_state);
7845 }
7846 
7847 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
7848 	.prepare_fb = dm_plane_helper_prepare_fb,
7849 	.cleanup_fb = dm_plane_helper_cleanup_fb,
7850 	.atomic_check = dm_plane_atomic_check,
7851 	.atomic_async_check = dm_plane_atomic_async_check,
7852 	.atomic_async_update = dm_plane_atomic_async_update
7853 };
7854 
7855 /*
7856  * TODO: these are currently initialized to rgb formats only.
7857  * For future use cases we should either initialize them dynamically based on
7858  * plane capabilities, or initialize this array to all formats, so internal drm
7859  * check will succeed, and let DC implement proper check
7860  */
7861 static const uint32_t rgb_formats[] = {
7862 	DRM_FORMAT_XRGB8888,
7863 	DRM_FORMAT_ARGB8888,
7864 	DRM_FORMAT_RGBA8888,
7865 	DRM_FORMAT_XRGB2101010,
7866 	DRM_FORMAT_XBGR2101010,
7867 	DRM_FORMAT_ARGB2101010,
7868 	DRM_FORMAT_ABGR2101010,
7869 	DRM_FORMAT_XRGB16161616,
7870 	DRM_FORMAT_XBGR16161616,
7871 	DRM_FORMAT_ARGB16161616,
7872 	DRM_FORMAT_ABGR16161616,
7873 	DRM_FORMAT_XBGR8888,
7874 	DRM_FORMAT_ABGR8888,
7875 	DRM_FORMAT_RGB565,
7876 };
7877 
7878 static const uint32_t overlay_formats[] = {
7879 	DRM_FORMAT_XRGB8888,
7880 	DRM_FORMAT_ARGB8888,
7881 	DRM_FORMAT_RGBA8888,
7882 	DRM_FORMAT_XBGR8888,
7883 	DRM_FORMAT_ABGR8888,
7884 	DRM_FORMAT_RGB565
7885 };
7886 
7887 static const u32 cursor_formats[] = {
7888 	DRM_FORMAT_ARGB8888
7889 };
7890 
7891 static int get_plane_formats(const struct drm_plane *plane,
7892 			     const struct dc_plane_cap *plane_cap,
7893 			     uint32_t *formats, int max_formats)
7894 {
7895 	int i, num_formats = 0;
7896 
7897 	/*
7898 	 * TODO: Query support for each group of formats directly from
7899 	 * DC plane caps. This will require adding more formats to the
7900 	 * caps list.
7901 	 */
7902 
7903 	switch (plane->type) {
7904 	case DRM_PLANE_TYPE_PRIMARY:
7905 		for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
7906 			if (num_formats >= max_formats)
7907 				break;
7908 
7909 			formats[num_formats++] = rgb_formats[i];
7910 		}
7911 
7912 		if (plane_cap && plane_cap->pixel_format_support.nv12)
7913 			formats[num_formats++] = DRM_FORMAT_NV12;
7914 		if (plane_cap && plane_cap->pixel_format_support.p010)
7915 			formats[num_formats++] = DRM_FORMAT_P010;
7916 		if (plane_cap && plane_cap->pixel_format_support.fp16) {
7917 			formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
7918 			formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
7919 			formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
7920 			formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
7921 		}
7922 		break;
7923 
7924 	case DRM_PLANE_TYPE_OVERLAY:
7925 		for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
7926 			if (num_formats >= max_formats)
7927 				break;
7928 
7929 			formats[num_formats++] = overlay_formats[i];
7930 		}
7931 		break;
7932 
7933 	case DRM_PLANE_TYPE_CURSOR:
7934 		for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
7935 			if (num_formats >= max_formats)
7936 				break;
7937 
7938 			formats[num_formats++] = cursor_formats[i];
7939 		}
7940 		break;
7941 	}
7942 
7943 	return num_formats;
7944 }
7945 
7946 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
7947 				struct drm_plane *plane,
7948 				unsigned long possible_crtcs,
7949 				const struct dc_plane_cap *plane_cap)
7950 {
7951 	uint32_t formats[32];
7952 	int num_formats;
7953 	int res = -EPERM;
7954 	unsigned int supported_rotations;
7955 	uint64_t *modifiers = NULL;
7956 
7957 	num_formats = get_plane_formats(plane, plane_cap, formats,
7958 					ARRAY_SIZE(formats));
7959 
7960 	res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
7961 	if (res)
7962 		return res;
7963 
7964 	if (modifiers == NULL)
7965 		adev_to_drm(dm->adev)->mode_config.fb_modifiers_not_supported = true;
7966 
7967 	res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
7968 				       &dm_plane_funcs, formats, num_formats,
7969 				       modifiers, plane->type, NULL);
7970 	kfree(modifiers);
7971 	if (res)
7972 		return res;
7973 
7974 	if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
7975 	    plane_cap && plane_cap->per_pixel_alpha) {
7976 		unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
7977 					  BIT(DRM_MODE_BLEND_PREMULTI) |
7978 					  BIT(DRM_MODE_BLEND_COVERAGE);
7979 
7980 		drm_plane_create_alpha_property(plane);
7981 		drm_plane_create_blend_mode_property(plane, blend_caps);
7982 	}
7983 
7984 	if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
7985 	    plane_cap &&
7986 	    (plane_cap->pixel_format_support.nv12 ||
7987 	     plane_cap->pixel_format_support.p010)) {
7988 		/* This only affects YUV formats. */
7989 		drm_plane_create_color_properties(
7990 			plane,
7991 			BIT(DRM_COLOR_YCBCR_BT601) |
7992 			BIT(DRM_COLOR_YCBCR_BT709) |
7993 			BIT(DRM_COLOR_YCBCR_BT2020),
7994 			BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
7995 			BIT(DRM_COLOR_YCBCR_FULL_RANGE),
7996 			DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
7997 	}
7998 
7999 	supported_rotations =
8000 		DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
8001 		DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
8002 
8003 	if (dm->adev->asic_type >= CHIP_BONAIRE &&
8004 	    plane->type != DRM_PLANE_TYPE_CURSOR)
8005 		drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
8006 						   supported_rotations);
8007 
8008 	drm_plane_helper_add(plane, &dm_plane_helper_funcs);
8009 
8010 	/* Create (reset) the plane state */
8011 	if (plane->funcs->reset)
8012 		plane->funcs->reset(plane);
8013 
8014 	return 0;
8015 }
8016 
8017 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
8018 			       struct drm_plane *plane,
8019 			       uint32_t crtc_index)
8020 {
8021 	struct amdgpu_crtc *acrtc = NULL;
8022 	struct drm_plane *cursor_plane;
8023 
8024 	int res = -ENOMEM;
8025 
8026 	cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
8027 	if (!cursor_plane)
8028 		goto fail;
8029 
8030 	cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
8031 	res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
8032 
8033 	acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
8034 	if (!acrtc)
8035 		goto fail;
8036 
8037 	res = drm_crtc_init_with_planes(
8038 			dm->ddev,
8039 			&acrtc->base,
8040 			plane,
8041 			cursor_plane,
8042 			&amdgpu_dm_crtc_funcs, NULL);
8043 
8044 	if (res)
8045 		goto fail;
8046 
8047 	drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
8048 
8049 	/* Create (reset) the plane state */
8050 	if (acrtc->base.funcs->reset)
8051 		acrtc->base.funcs->reset(&acrtc->base);
8052 
8053 	acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
8054 	acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
8055 
8056 	acrtc->crtc_id = crtc_index;
8057 	acrtc->base.enabled = false;
8058 	acrtc->otg_inst = -1;
8059 
8060 	dm->adev->mode_info.crtcs[crtc_index] = acrtc;
8061 	drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
8062 				   true, MAX_COLOR_LUT_ENTRIES);
8063 	drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
8064 
8065 	return 0;
8066 
8067 fail:
8068 	kfree(acrtc);
8069 	kfree(cursor_plane);
8070 	return res;
8071 }
8072 
8073 
8074 static int to_drm_connector_type(enum signal_type st)
8075 {
8076 	switch (st) {
8077 	case SIGNAL_TYPE_HDMI_TYPE_A:
8078 		return DRM_MODE_CONNECTOR_HDMIA;
8079 	case SIGNAL_TYPE_EDP:
8080 		return DRM_MODE_CONNECTOR_eDP;
8081 	case SIGNAL_TYPE_LVDS:
8082 		return DRM_MODE_CONNECTOR_LVDS;
8083 	case SIGNAL_TYPE_RGB:
8084 		return DRM_MODE_CONNECTOR_VGA;
8085 	case SIGNAL_TYPE_DISPLAY_PORT:
8086 	case SIGNAL_TYPE_DISPLAY_PORT_MST:
8087 		return DRM_MODE_CONNECTOR_DisplayPort;
8088 	case SIGNAL_TYPE_DVI_DUAL_LINK:
8089 	case SIGNAL_TYPE_DVI_SINGLE_LINK:
8090 		return DRM_MODE_CONNECTOR_DVID;
8091 	case SIGNAL_TYPE_VIRTUAL:
8092 		return DRM_MODE_CONNECTOR_VIRTUAL;
8093 
8094 	default:
8095 		return DRM_MODE_CONNECTOR_Unknown;
8096 	}
8097 }
8098 
8099 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
8100 {
8101 	struct drm_encoder *encoder;
8102 
8103 	/* There is only one encoder per connector */
8104 	drm_connector_for_each_possible_encoder(connector, encoder)
8105 		return encoder;
8106 
8107 	return NULL;
8108 }
8109 
8110 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
8111 {
8112 	struct drm_encoder *encoder;
8113 	struct amdgpu_encoder *amdgpu_encoder;
8114 
8115 	encoder = amdgpu_dm_connector_to_encoder(connector);
8116 
8117 	if (encoder == NULL)
8118 		return;
8119 
8120 	amdgpu_encoder = to_amdgpu_encoder(encoder);
8121 
8122 	amdgpu_encoder->native_mode.clock = 0;
8123 
8124 	if (!list_empty(&connector->probed_modes)) {
8125 		struct drm_display_mode *preferred_mode = NULL;
8126 
8127 		list_for_each_entry(preferred_mode,
8128 				    &connector->probed_modes,
8129 				    head) {
8130 			if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
8131 				amdgpu_encoder->native_mode = *preferred_mode;
8132 
8133 			break;
8134 		}
8135 
8136 	}
8137 }
8138 
8139 static struct drm_display_mode *
8140 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
8141 			     char *name,
8142 			     int hdisplay, int vdisplay)
8143 {
8144 	struct drm_device *dev = encoder->dev;
8145 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
8146 	struct drm_display_mode *mode = NULL;
8147 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
8148 
8149 	mode = drm_mode_duplicate(dev, native_mode);
8150 
8151 	if (mode == NULL)
8152 		return NULL;
8153 
8154 	mode->hdisplay = hdisplay;
8155 	mode->vdisplay = vdisplay;
8156 	mode->type &= ~DRM_MODE_TYPE_PREFERRED;
8157 	strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
8158 
8159 	return mode;
8160 
8161 }
8162 
8163 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
8164 						 struct drm_connector *connector)
8165 {
8166 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
8167 	struct drm_display_mode *mode = NULL;
8168 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
8169 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8170 				to_amdgpu_dm_connector(connector);
8171 	int i;
8172 	int n;
8173 	struct mode_size {
8174 		char name[DRM_DISPLAY_MODE_LEN];
8175 		int w;
8176 		int h;
8177 	} common_modes[] = {
8178 		{  "640x480",  640,  480},
8179 		{  "800x600",  800,  600},
8180 		{ "1024x768", 1024,  768},
8181 		{ "1280x720", 1280,  720},
8182 		{ "1280x800", 1280,  800},
8183 		{"1280x1024", 1280, 1024},
8184 		{ "1440x900", 1440,  900},
8185 		{"1680x1050", 1680, 1050},
8186 		{"1600x1200", 1600, 1200},
8187 		{"1920x1080", 1920, 1080},
8188 		{"1920x1200", 1920, 1200}
8189 	};
8190 
8191 	n = ARRAY_SIZE(common_modes);
8192 
8193 	for (i = 0; i < n; i++) {
8194 		struct drm_display_mode *curmode = NULL;
8195 		bool mode_existed = false;
8196 
8197 		if (common_modes[i].w > native_mode->hdisplay ||
8198 		    common_modes[i].h > native_mode->vdisplay ||
8199 		   (common_modes[i].w == native_mode->hdisplay &&
8200 		    common_modes[i].h == native_mode->vdisplay))
8201 			continue;
8202 
8203 		list_for_each_entry(curmode, &connector->probed_modes, head) {
8204 			if (common_modes[i].w == curmode->hdisplay &&
8205 			    common_modes[i].h == curmode->vdisplay) {
8206 				mode_existed = true;
8207 				break;
8208 			}
8209 		}
8210 
8211 		if (mode_existed)
8212 			continue;
8213 
8214 		mode = amdgpu_dm_create_common_mode(encoder,
8215 				common_modes[i].name, common_modes[i].w,
8216 				common_modes[i].h);
8217 		if (!mode)
8218 			continue;
8219 
8220 		drm_mode_probed_add(connector, mode);
8221 		amdgpu_dm_connector->num_modes++;
8222 	}
8223 }
8224 
8225 static void amdgpu_set_panel_orientation(struct drm_connector *connector)
8226 {
8227 	struct drm_encoder *encoder;
8228 	struct amdgpu_encoder *amdgpu_encoder;
8229 	const struct drm_display_mode *native_mode;
8230 
8231 	if (connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
8232 	    connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
8233 		return;
8234 
8235 	encoder = amdgpu_dm_connector_to_encoder(connector);
8236 	if (!encoder)
8237 		return;
8238 
8239 	amdgpu_encoder = to_amdgpu_encoder(encoder);
8240 
8241 	native_mode = &amdgpu_encoder->native_mode;
8242 	if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0)
8243 		return;
8244 
8245 	drm_connector_set_panel_orientation_with_quirk(connector,
8246 						       DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
8247 						       native_mode->hdisplay,
8248 						       native_mode->vdisplay);
8249 }
8250 
8251 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
8252 					      struct edid *edid)
8253 {
8254 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8255 			to_amdgpu_dm_connector(connector);
8256 
8257 	if (edid) {
8258 		/* empty probed_modes */
8259 		INIT_LIST_HEAD(&connector->probed_modes);
8260 		amdgpu_dm_connector->num_modes =
8261 				drm_add_edid_modes(connector, edid);
8262 
8263 		/* sorting the probed modes before calling function
8264 		 * amdgpu_dm_get_native_mode() since EDID can have
8265 		 * more than one preferred mode. The modes that are
8266 		 * later in the probed mode list could be of higher
8267 		 * and preferred resolution. For example, 3840x2160
8268 		 * resolution in base EDID preferred timing and 4096x2160
8269 		 * preferred resolution in DID extension block later.
8270 		 */
8271 		drm_mode_sort(&connector->probed_modes);
8272 		amdgpu_dm_get_native_mode(connector);
8273 
8274 		/* Freesync capabilities are reset by calling
8275 		 * drm_add_edid_modes() and need to be
8276 		 * restored here.
8277 		 */
8278 		amdgpu_dm_update_freesync_caps(connector, edid);
8279 
8280 		amdgpu_set_panel_orientation(connector);
8281 	} else {
8282 		amdgpu_dm_connector->num_modes = 0;
8283 	}
8284 }
8285 
8286 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
8287 			      struct drm_display_mode *mode)
8288 {
8289 	struct drm_display_mode *m;
8290 
8291 	list_for_each_entry (m, &aconnector->base.probed_modes, head) {
8292 		if (drm_mode_equal(m, mode))
8293 			return true;
8294 	}
8295 
8296 	return false;
8297 }
8298 
8299 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
8300 {
8301 	const struct drm_display_mode *m;
8302 	struct drm_display_mode *new_mode;
8303 	uint i;
8304 	uint32_t new_modes_count = 0;
8305 
8306 	/* Standard FPS values
8307 	 *
8308 	 * 23.976       - TV/NTSC
8309 	 * 24 	        - Cinema
8310 	 * 25 	        - TV/PAL
8311 	 * 29.97        - TV/NTSC
8312 	 * 30 	        - TV/NTSC
8313 	 * 48 	        - Cinema HFR
8314 	 * 50 	        - TV/PAL
8315 	 * 60 	        - Commonly used
8316 	 * 48,72,96,120 - Multiples of 24
8317 	 */
8318 	static const uint32_t common_rates[] = {
8319 		23976, 24000, 25000, 29970, 30000,
8320 		48000, 50000, 60000, 72000, 96000, 120000
8321 	};
8322 
8323 	/*
8324 	 * Find mode with highest refresh rate with the same resolution
8325 	 * as the preferred mode. Some monitors report a preferred mode
8326 	 * with lower resolution than the highest refresh rate supported.
8327 	 */
8328 
8329 	m = get_highest_refresh_rate_mode(aconnector, true);
8330 	if (!m)
8331 		return 0;
8332 
8333 	for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
8334 		uint64_t target_vtotal, target_vtotal_diff;
8335 		uint64_t num, den;
8336 
8337 		if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
8338 			continue;
8339 
8340 		if (common_rates[i] < aconnector->min_vfreq * 1000 ||
8341 		    common_rates[i] > aconnector->max_vfreq * 1000)
8342 			continue;
8343 
8344 		num = (unsigned long long)m->clock * 1000 * 1000;
8345 		den = common_rates[i] * (unsigned long long)m->htotal;
8346 		target_vtotal = div_u64(num, den);
8347 		target_vtotal_diff = target_vtotal - m->vtotal;
8348 
8349 		/* Check for illegal modes */
8350 		if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
8351 		    m->vsync_end + target_vtotal_diff < m->vsync_start ||
8352 		    m->vtotal + target_vtotal_diff < m->vsync_end)
8353 			continue;
8354 
8355 		new_mode = drm_mode_duplicate(aconnector->base.dev, m);
8356 		if (!new_mode)
8357 			goto out;
8358 
8359 		new_mode->vtotal += (u16)target_vtotal_diff;
8360 		new_mode->vsync_start += (u16)target_vtotal_diff;
8361 		new_mode->vsync_end += (u16)target_vtotal_diff;
8362 		new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
8363 		new_mode->type |= DRM_MODE_TYPE_DRIVER;
8364 
8365 		if (!is_duplicate_mode(aconnector, new_mode)) {
8366 			drm_mode_probed_add(&aconnector->base, new_mode);
8367 			new_modes_count += 1;
8368 		} else
8369 			drm_mode_destroy(aconnector->base.dev, new_mode);
8370 	}
8371  out:
8372 	return new_modes_count;
8373 }
8374 
8375 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
8376 						   struct edid *edid)
8377 {
8378 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8379 		to_amdgpu_dm_connector(connector);
8380 
8381 	if (!edid)
8382 		return;
8383 
8384 	if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
8385 		amdgpu_dm_connector->num_modes +=
8386 			add_fs_modes(amdgpu_dm_connector);
8387 }
8388 
8389 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
8390 {
8391 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8392 			to_amdgpu_dm_connector(connector);
8393 	struct drm_encoder *encoder;
8394 	struct edid *edid = amdgpu_dm_connector->edid;
8395 
8396 	encoder = amdgpu_dm_connector_to_encoder(connector);
8397 
8398 	if (!drm_edid_is_valid(edid)) {
8399 		amdgpu_dm_connector->num_modes =
8400 				drm_add_modes_noedid(connector, 640, 480);
8401 	} else {
8402 		amdgpu_dm_connector_ddc_get_modes(connector, edid);
8403 		amdgpu_dm_connector_add_common_modes(encoder, connector);
8404 		amdgpu_dm_connector_add_freesync_modes(connector, edid);
8405 	}
8406 	amdgpu_dm_fbc_init(connector);
8407 
8408 	return amdgpu_dm_connector->num_modes;
8409 }
8410 
8411 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
8412 				     struct amdgpu_dm_connector *aconnector,
8413 				     int connector_type,
8414 				     struct dc_link *link,
8415 				     int link_index)
8416 {
8417 	struct amdgpu_device *adev = drm_to_adev(dm->ddev);
8418 
8419 	/*
8420 	 * Some of the properties below require access to state, like bpc.
8421 	 * Allocate some default initial connector state with our reset helper.
8422 	 */
8423 	if (aconnector->base.funcs->reset)
8424 		aconnector->base.funcs->reset(&aconnector->base);
8425 
8426 	aconnector->connector_id = link_index;
8427 	aconnector->dc_link = link;
8428 	aconnector->base.interlace_allowed = false;
8429 	aconnector->base.doublescan_allowed = false;
8430 	aconnector->base.stereo_allowed = false;
8431 	aconnector->base.dpms = DRM_MODE_DPMS_OFF;
8432 	aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
8433 	aconnector->audio_inst = -1;
8434 	mutex_init(&aconnector->hpd_lock);
8435 
8436 	/*
8437 	 * configure support HPD hot plug connector_>polled default value is 0
8438 	 * which means HPD hot plug not supported
8439 	 */
8440 	switch (connector_type) {
8441 	case DRM_MODE_CONNECTOR_HDMIA:
8442 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8443 		aconnector->base.ycbcr_420_allowed =
8444 			link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
8445 		break;
8446 	case DRM_MODE_CONNECTOR_DisplayPort:
8447 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8448 		link->link_enc = link_enc_cfg_get_link_enc(link);
8449 		ASSERT(link->link_enc);
8450 		if (link->link_enc)
8451 			aconnector->base.ycbcr_420_allowed =
8452 			link->link_enc->features.dp_ycbcr420_supported ? true : false;
8453 		break;
8454 	case DRM_MODE_CONNECTOR_DVID:
8455 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8456 		break;
8457 	default:
8458 		break;
8459 	}
8460 
8461 	drm_object_attach_property(&aconnector->base.base,
8462 				dm->ddev->mode_config.scaling_mode_property,
8463 				DRM_MODE_SCALE_NONE);
8464 
8465 	drm_object_attach_property(&aconnector->base.base,
8466 				adev->mode_info.underscan_property,
8467 				UNDERSCAN_OFF);
8468 	drm_object_attach_property(&aconnector->base.base,
8469 				adev->mode_info.underscan_hborder_property,
8470 				0);
8471 	drm_object_attach_property(&aconnector->base.base,
8472 				adev->mode_info.underscan_vborder_property,
8473 				0);
8474 
8475 	if (!aconnector->mst_port)
8476 		drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
8477 
8478 	/* This defaults to the max in the range, but we want 8bpc for non-edp. */
8479 	aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
8480 	aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
8481 
8482 	if (connector_type == DRM_MODE_CONNECTOR_eDP &&
8483 	    (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
8484 		drm_object_attach_property(&aconnector->base.base,
8485 				adev->mode_info.abm_level_property, 0);
8486 	}
8487 
8488 	if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
8489 	    connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
8490 	    connector_type == DRM_MODE_CONNECTOR_eDP) {
8491 		drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
8492 
8493 		if (!aconnector->mst_port)
8494 			drm_connector_attach_vrr_capable_property(&aconnector->base);
8495 
8496 #ifdef CONFIG_DRM_AMD_DC_HDCP
8497 		if (adev->dm.hdcp_workqueue)
8498 			drm_connector_attach_content_protection_property(&aconnector->base, true);
8499 #endif
8500 	}
8501 }
8502 
8503 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
8504 			      struct i2c_msg *msgs, int num)
8505 {
8506 	struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
8507 	struct ddc_service *ddc_service = i2c->ddc_service;
8508 	struct i2c_command cmd;
8509 	int i;
8510 	int result = -EIO;
8511 
8512 	cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
8513 
8514 	if (!cmd.payloads)
8515 		return result;
8516 
8517 	cmd.number_of_payloads = num;
8518 	cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
8519 	cmd.speed = 100;
8520 
8521 	for (i = 0; i < num; i++) {
8522 		cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
8523 		cmd.payloads[i].address = msgs[i].addr;
8524 		cmd.payloads[i].length = msgs[i].len;
8525 		cmd.payloads[i].data = msgs[i].buf;
8526 	}
8527 
8528 	if (dc_submit_i2c(
8529 			ddc_service->ctx->dc,
8530 			ddc_service->ddc_pin->hw_info.ddc_channel,
8531 			&cmd))
8532 		result = num;
8533 
8534 	kfree(cmd.payloads);
8535 	return result;
8536 }
8537 
8538 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
8539 {
8540 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
8541 }
8542 
8543 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
8544 	.master_xfer = amdgpu_dm_i2c_xfer,
8545 	.functionality = amdgpu_dm_i2c_func,
8546 };
8547 
8548 static struct amdgpu_i2c_adapter *
8549 create_i2c(struct ddc_service *ddc_service,
8550 	   int link_index,
8551 	   int *res)
8552 {
8553 	struct amdgpu_device *adev = ddc_service->ctx->driver_context;
8554 	struct amdgpu_i2c_adapter *i2c;
8555 
8556 	i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
8557 	if (!i2c)
8558 		return NULL;
8559 	i2c->base.owner = THIS_MODULE;
8560 	i2c->base.class = I2C_CLASS_DDC;
8561 	i2c->base.dev.parent = &adev->pdev->dev;
8562 	i2c->base.algo = &amdgpu_dm_i2c_algo;
8563 	snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
8564 	i2c_set_adapdata(&i2c->base, i2c);
8565 	i2c->ddc_service = ddc_service;
8566 	if (i2c->ddc_service->ddc_pin)
8567 		i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
8568 
8569 	return i2c;
8570 }
8571 
8572 
8573 /*
8574  * Note: this function assumes that dc_link_detect() was called for the
8575  * dc_link which will be represented by this aconnector.
8576  */
8577 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
8578 				    struct amdgpu_dm_connector *aconnector,
8579 				    uint32_t link_index,
8580 				    struct amdgpu_encoder *aencoder)
8581 {
8582 	int res = 0;
8583 	int connector_type;
8584 	struct dc *dc = dm->dc;
8585 	struct dc_link *link = dc_get_link_at_index(dc, link_index);
8586 	struct amdgpu_i2c_adapter *i2c;
8587 
8588 	link->priv = aconnector;
8589 
8590 	DRM_DEBUG_DRIVER("%s()\n", __func__);
8591 
8592 	i2c = create_i2c(link->ddc, link->link_index, &res);
8593 	if (!i2c) {
8594 		DRM_ERROR("Failed to create i2c adapter data\n");
8595 		return -ENOMEM;
8596 	}
8597 
8598 	aconnector->i2c = i2c;
8599 	res = i2c_add_adapter(&i2c->base);
8600 
8601 	if (res) {
8602 		DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
8603 		goto out_free;
8604 	}
8605 
8606 	connector_type = to_drm_connector_type(link->connector_signal);
8607 
8608 	res = drm_connector_init_with_ddc(
8609 			dm->ddev,
8610 			&aconnector->base,
8611 			&amdgpu_dm_connector_funcs,
8612 			connector_type,
8613 			&i2c->base);
8614 
8615 	if (res) {
8616 		DRM_ERROR("connector_init failed\n");
8617 		aconnector->connector_id = -1;
8618 		goto out_free;
8619 	}
8620 
8621 	drm_connector_helper_add(
8622 			&aconnector->base,
8623 			&amdgpu_dm_connector_helper_funcs);
8624 
8625 	amdgpu_dm_connector_init_helper(
8626 		dm,
8627 		aconnector,
8628 		connector_type,
8629 		link,
8630 		link_index);
8631 
8632 	drm_connector_attach_encoder(
8633 		&aconnector->base, &aencoder->base);
8634 
8635 	if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
8636 		|| connector_type == DRM_MODE_CONNECTOR_eDP)
8637 		amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
8638 
8639 out_free:
8640 	if (res) {
8641 		kfree(i2c);
8642 		aconnector->i2c = NULL;
8643 	}
8644 	return res;
8645 }
8646 
8647 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
8648 {
8649 	switch (adev->mode_info.num_crtc) {
8650 	case 1:
8651 		return 0x1;
8652 	case 2:
8653 		return 0x3;
8654 	case 3:
8655 		return 0x7;
8656 	case 4:
8657 		return 0xf;
8658 	case 5:
8659 		return 0x1f;
8660 	case 6:
8661 	default:
8662 		return 0x3f;
8663 	}
8664 }
8665 
8666 static int amdgpu_dm_encoder_init(struct drm_device *dev,
8667 				  struct amdgpu_encoder *aencoder,
8668 				  uint32_t link_index)
8669 {
8670 	struct amdgpu_device *adev = drm_to_adev(dev);
8671 
8672 	int res = drm_encoder_init(dev,
8673 				   &aencoder->base,
8674 				   &amdgpu_dm_encoder_funcs,
8675 				   DRM_MODE_ENCODER_TMDS,
8676 				   NULL);
8677 
8678 	aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
8679 
8680 	if (!res)
8681 		aencoder->encoder_id = link_index;
8682 	else
8683 		aencoder->encoder_id = -1;
8684 
8685 	drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
8686 
8687 	return res;
8688 }
8689 
8690 static void manage_dm_interrupts(struct amdgpu_device *adev,
8691 				 struct amdgpu_crtc *acrtc,
8692 				 bool enable)
8693 {
8694 	/*
8695 	 * We have no guarantee that the frontend index maps to the same
8696 	 * backend index - some even map to more than one.
8697 	 *
8698 	 * TODO: Use a different interrupt or check DC itself for the mapping.
8699 	 */
8700 	int irq_type =
8701 		amdgpu_display_crtc_idx_to_irq_type(
8702 			adev,
8703 			acrtc->crtc_id);
8704 
8705 	if (enable) {
8706 		drm_crtc_vblank_on(&acrtc->base);
8707 		amdgpu_irq_get(
8708 			adev,
8709 			&adev->pageflip_irq,
8710 			irq_type);
8711 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8712 		amdgpu_irq_get(
8713 			adev,
8714 			&adev->vline0_irq,
8715 			irq_type);
8716 #endif
8717 	} else {
8718 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8719 		amdgpu_irq_put(
8720 			adev,
8721 			&adev->vline0_irq,
8722 			irq_type);
8723 #endif
8724 		amdgpu_irq_put(
8725 			adev,
8726 			&adev->pageflip_irq,
8727 			irq_type);
8728 		drm_crtc_vblank_off(&acrtc->base);
8729 	}
8730 }
8731 
8732 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
8733 				      struct amdgpu_crtc *acrtc)
8734 {
8735 	int irq_type =
8736 		amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
8737 
8738 	/**
8739 	 * This reads the current state for the IRQ and force reapplies
8740 	 * the setting to hardware.
8741 	 */
8742 	amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
8743 }
8744 
8745 static bool
8746 is_scaling_state_different(const struct dm_connector_state *dm_state,
8747 			   const struct dm_connector_state *old_dm_state)
8748 {
8749 	if (dm_state->scaling != old_dm_state->scaling)
8750 		return true;
8751 	if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
8752 		if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
8753 			return true;
8754 	} else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
8755 		if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
8756 			return true;
8757 	} else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
8758 		   dm_state->underscan_vborder != old_dm_state->underscan_vborder)
8759 		return true;
8760 	return false;
8761 }
8762 
8763 #ifdef CONFIG_DRM_AMD_DC_HDCP
8764 static bool is_content_protection_different(struct drm_connector_state *state,
8765 					    const struct drm_connector_state *old_state,
8766 					    const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
8767 {
8768 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8769 	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
8770 
8771 	/* Handle: Type0/1 change */
8772 	if (old_state->hdcp_content_type != state->hdcp_content_type &&
8773 	    state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
8774 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8775 		return true;
8776 	}
8777 
8778 	/* CP is being re enabled, ignore this
8779 	 *
8780 	 * Handles:	ENABLED -> DESIRED
8781 	 */
8782 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
8783 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8784 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
8785 		return false;
8786 	}
8787 
8788 	/* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
8789 	 *
8790 	 * Handles:	UNDESIRED -> ENABLED
8791 	 */
8792 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
8793 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
8794 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8795 
8796 	/* Stream removed and re-enabled
8797 	 *
8798 	 * Can sometimes overlap with the HPD case,
8799 	 * thus set update_hdcp to false to avoid
8800 	 * setting HDCP multiple times.
8801 	 *
8802 	 * Handles:	DESIRED -> DESIRED (Special case)
8803 	 */
8804 	if (!(old_state->crtc && old_state->crtc->enabled) &&
8805 		state->crtc && state->crtc->enabled &&
8806 		connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8807 		dm_con_state->update_hdcp = false;
8808 		return true;
8809 	}
8810 
8811 	/* Hot-plug, headless s3, dpms
8812 	 *
8813 	 * Only start HDCP if the display is connected/enabled.
8814 	 * update_hdcp flag will be set to false until the next
8815 	 * HPD comes in.
8816 	 *
8817 	 * Handles:	DESIRED -> DESIRED (Special case)
8818 	 */
8819 	if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
8820 	    connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
8821 		dm_con_state->update_hdcp = false;
8822 		return true;
8823 	}
8824 
8825 	/*
8826 	 * Handles:	UNDESIRED -> UNDESIRED
8827 	 *		DESIRED -> DESIRED
8828 	 *		ENABLED -> ENABLED
8829 	 */
8830 	if (old_state->content_protection == state->content_protection)
8831 		return false;
8832 
8833 	/*
8834 	 * Handles:	UNDESIRED -> DESIRED
8835 	 *		DESIRED -> UNDESIRED
8836 	 *		ENABLED -> UNDESIRED
8837 	 */
8838 	if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
8839 		return true;
8840 
8841 	/*
8842 	 * Handles:	DESIRED -> ENABLED
8843 	 */
8844 	return false;
8845 }
8846 
8847 #endif
8848 static void remove_stream(struct amdgpu_device *adev,
8849 			  struct amdgpu_crtc *acrtc,
8850 			  struct dc_stream_state *stream)
8851 {
8852 	/* this is the update mode case */
8853 
8854 	acrtc->otg_inst = -1;
8855 	acrtc->enabled = false;
8856 }
8857 
8858 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
8859 			       struct dc_cursor_position *position)
8860 {
8861 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8862 	int x, y;
8863 	int xorigin = 0, yorigin = 0;
8864 
8865 	if (!crtc || !plane->state->fb)
8866 		return 0;
8867 
8868 	if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
8869 	    (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
8870 		DRM_ERROR("%s: bad cursor width or height %d x %d\n",
8871 			  __func__,
8872 			  plane->state->crtc_w,
8873 			  plane->state->crtc_h);
8874 		return -EINVAL;
8875 	}
8876 
8877 	x = plane->state->crtc_x;
8878 	y = plane->state->crtc_y;
8879 
8880 	if (x <= -amdgpu_crtc->max_cursor_width ||
8881 	    y <= -amdgpu_crtc->max_cursor_height)
8882 		return 0;
8883 
8884 	if (x < 0) {
8885 		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
8886 		x = 0;
8887 	}
8888 	if (y < 0) {
8889 		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
8890 		y = 0;
8891 	}
8892 	position->enable = true;
8893 	position->translate_by_source = true;
8894 	position->x = x;
8895 	position->y = y;
8896 	position->x_hotspot = xorigin;
8897 	position->y_hotspot = yorigin;
8898 
8899 	return 0;
8900 }
8901 
8902 static void handle_cursor_update(struct drm_plane *plane,
8903 				 struct drm_plane_state *old_plane_state)
8904 {
8905 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
8906 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
8907 	struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
8908 	struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
8909 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8910 	uint64_t address = afb ? afb->address : 0;
8911 	struct dc_cursor_position position = {0};
8912 	struct dc_cursor_attributes attributes;
8913 	int ret;
8914 
8915 	if (!plane->state->fb && !old_plane_state->fb)
8916 		return;
8917 
8918 	DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
8919 		      __func__,
8920 		      amdgpu_crtc->crtc_id,
8921 		      plane->state->crtc_w,
8922 		      plane->state->crtc_h);
8923 
8924 	ret = get_cursor_position(plane, crtc, &position);
8925 	if (ret)
8926 		return;
8927 
8928 	if (!position.enable) {
8929 		/* turn off cursor */
8930 		if (crtc_state && crtc_state->stream) {
8931 			mutex_lock(&adev->dm.dc_lock);
8932 			dc_stream_set_cursor_position(crtc_state->stream,
8933 						      &position);
8934 			mutex_unlock(&adev->dm.dc_lock);
8935 		}
8936 		return;
8937 	}
8938 
8939 	amdgpu_crtc->cursor_width = plane->state->crtc_w;
8940 	amdgpu_crtc->cursor_height = plane->state->crtc_h;
8941 
8942 	memset(&attributes, 0, sizeof(attributes));
8943 	attributes.address.high_part = upper_32_bits(address);
8944 	attributes.address.low_part  = lower_32_bits(address);
8945 	attributes.width             = plane->state->crtc_w;
8946 	attributes.height            = plane->state->crtc_h;
8947 	attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
8948 	attributes.rotation_angle    = 0;
8949 	attributes.attribute_flags.value = 0;
8950 
8951 	attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
8952 
8953 	if (crtc_state->stream) {
8954 		mutex_lock(&adev->dm.dc_lock);
8955 		if (!dc_stream_set_cursor_attributes(crtc_state->stream,
8956 							 &attributes))
8957 			DRM_ERROR("DC failed to set cursor attributes\n");
8958 
8959 		if (!dc_stream_set_cursor_position(crtc_state->stream,
8960 						   &position))
8961 			DRM_ERROR("DC failed to set cursor position\n");
8962 		mutex_unlock(&adev->dm.dc_lock);
8963 	}
8964 }
8965 
8966 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
8967 {
8968 
8969 	assert_spin_locked(&acrtc->base.dev->event_lock);
8970 	WARN_ON(acrtc->event);
8971 
8972 	acrtc->event = acrtc->base.state->event;
8973 
8974 	/* Set the flip status */
8975 	acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
8976 
8977 	/* Mark this event as consumed */
8978 	acrtc->base.state->event = NULL;
8979 
8980 	DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
8981 		     acrtc->crtc_id);
8982 }
8983 
8984 static void update_freesync_state_on_stream(
8985 	struct amdgpu_display_manager *dm,
8986 	struct dm_crtc_state *new_crtc_state,
8987 	struct dc_stream_state *new_stream,
8988 	struct dc_plane_state *surface,
8989 	u32 flip_timestamp_in_us)
8990 {
8991 	struct mod_vrr_params vrr_params;
8992 	struct dc_info_packet vrr_infopacket = {0};
8993 	struct amdgpu_device *adev = dm->adev;
8994 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8995 	unsigned long flags;
8996 	bool pack_sdp_v1_3 = false;
8997 
8998 	if (!new_stream)
8999 		return;
9000 
9001 	/*
9002 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
9003 	 * For now it's sufficient to just guard against these conditions.
9004 	 */
9005 
9006 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
9007 		return;
9008 
9009 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9010         vrr_params = acrtc->dm_irq_params.vrr_params;
9011 
9012 	if (surface) {
9013 		mod_freesync_handle_preflip(
9014 			dm->freesync_module,
9015 			surface,
9016 			new_stream,
9017 			flip_timestamp_in_us,
9018 			&vrr_params);
9019 
9020 		if (adev->family < AMDGPU_FAMILY_AI &&
9021 		    amdgpu_dm_vrr_active(new_crtc_state)) {
9022 			mod_freesync_handle_v_update(dm->freesync_module,
9023 						     new_stream, &vrr_params);
9024 
9025 			/* Need to call this before the frame ends. */
9026 			dc_stream_adjust_vmin_vmax(dm->dc,
9027 						   new_crtc_state->stream,
9028 						   &vrr_params.adjust);
9029 		}
9030 	}
9031 
9032 	mod_freesync_build_vrr_infopacket(
9033 		dm->freesync_module,
9034 		new_stream,
9035 		&vrr_params,
9036 		PACKET_TYPE_VRR,
9037 		TRANSFER_FUNC_UNKNOWN,
9038 		&vrr_infopacket,
9039 		pack_sdp_v1_3);
9040 
9041 	new_crtc_state->freesync_timing_changed |=
9042 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
9043 			&vrr_params.adjust,
9044 			sizeof(vrr_params.adjust)) != 0);
9045 
9046 	new_crtc_state->freesync_vrr_info_changed |=
9047 		(memcmp(&new_crtc_state->vrr_infopacket,
9048 			&vrr_infopacket,
9049 			sizeof(vrr_infopacket)) != 0);
9050 
9051 	acrtc->dm_irq_params.vrr_params = vrr_params;
9052 	new_crtc_state->vrr_infopacket = vrr_infopacket;
9053 
9054 	new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
9055 	new_stream->vrr_infopacket = vrr_infopacket;
9056 
9057 	if (new_crtc_state->freesync_vrr_info_changed)
9058 		DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
9059 			      new_crtc_state->base.crtc->base.id,
9060 			      (int)new_crtc_state->base.vrr_enabled,
9061 			      (int)vrr_params.state);
9062 
9063 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9064 }
9065 
9066 static void update_stream_irq_parameters(
9067 	struct amdgpu_display_manager *dm,
9068 	struct dm_crtc_state *new_crtc_state)
9069 {
9070 	struct dc_stream_state *new_stream = new_crtc_state->stream;
9071 	struct mod_vrr_params vrr_params;
9072 	struct mod_freesync_config config = new_crtc_state->freesync_config;
9073 	struct amdgpu_device *adev = dm->adev;
9074 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
9075 	unsigned long flags;
9076 
9077 	if (!new_stream)
9078 		return;
9079 
9080 	/*
9081 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
9082 	 * For now it's sufficient to just guard against these conditions.
9083 	 */
9084 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
9085 		return;
9086 
9087 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9088 	vrr_params = acrtc->dm_irq_params.vrr_params;
9089 
9090 	if (new_crtc_state->vrr_supported &&
9091 	    config.min_refresh_in_uhz &&
9092 	    config.max_refresh_in_uhz) {
9093 		/*
9094 		 * if freesync compatible mode was set, config.state will be set
9095 		 * in atomic check
9096 		 */
9097 		if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
9098 		    (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
9099 		     new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
9100 			vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
9101 			vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
9102 			vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
9103 			vrr_params.state = VRR_STATE_ACTIVE_FIXED;
9104 		} else {
9105 			config.state = new_crtc_state->base.vrr_enabled ?
9106 						     VRR_STATE_ACTIVE_VARIABLE :
9107 						     VRR_STATE_INACTIVE;
9108 		}
9109 	} else {
9110 		config.state = VRR_STATE_UNSUPPORTED;
9111 	}
9112 
9113 	mod_freesync_build_vrr_params(dm->freesync_module,
9114 				      new_stream,
9115 				      &config, &vrr_params);
9116 
9117 	new_crtc_state->freesync_timing_changed |=
9118 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
9119 			&vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
9120 
9121 	new_crtc_state->freesync_config = config;
9122 	/* Copy state for access from DM IRQ handler */
9123 	acrtc->dm_irq_params.freesync_config = config;
9124 	acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
9125 	acrtc->dm_irq_params.vrr_params = vrr_params;
9126 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9127 }
9128 
9129 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
9130 					    struct dm_crtc_state *new_state)
9131 {
9132 	bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
9133 	bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
9134 
9135 	if (!old_vrr_active && new_vrr_active) {
9136 		/* Transition VRR inactive -> active:
9137 		 * While VRR is active, we must not disable vblank irq, as a
9138 		 * reenable after disable would compute bogus vblank/pflip
9139 		 * timestamps if it likely happened inside display front-porch.
9140 		 *
9141 		 * We also need vupdate irq for the actual core vblank handling
9142 		 * at end of vblank.
9143 		 */
9144 		dm_set_vupdate_irq(new_state->base.crtc, true);
9145 		drm_crtc_vblank_get(new_state->base.crtc);
9146 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
9147 				 __func__, new_state->base.crtc->base.id);
9148 	} else if (old_vrr_active && !new_vrr_active) {
9149 		/* Transition VRR active -> inactive:
9150 		 * Allow vblank irq disable again for fixed refresh rate.
9151 		 */
9152 		dm_set_vupdate_irq(new_state->base.crtc, false);
9153 		drm_crtc_vblank_put(new_state->base.crtc);
9154 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
9155 				 __func__, new_state->base.crtc->base.id);
9156 	}
9157 }
9158 
9159 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
9160 {
9161 	struct drm_plane *plane;
9162 	struct drm_plane_state *old_plane_state;
9163 	int i;
9164 
9165 	/*
9166 	 * TODO: Make this per-stream so we don't issue redundant updates for
9167 	 * commits with multiple streams.
9168 	 */
9169 	for_each_old_plane_in_state(state, plane, old_plane_state, i)
9170 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
9171 			handle_cursor_update(plane, old_plane_state);
9172 }
9173 
9174 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
9175 				    struct dc_state *dc_state,
9176 				    struct drm_device *dev,
9177 				    struct amdgpu_display_manager *dm,
9178 				    struct drm_crtc *pcrtc,
9179 				    bool wait_for_vblank)
9180 {
9181 	uint32_t i;
9182 	uint64_t timestamp_ns;
9183 	struct drm_plane *plane;
9184 	struct drm_plane_state *old_plane_state, *new_plane_state;
9185 	struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
9186 	struct drm_crtc_state *new_pcrtc_state =
9187 			drm_atomic_get_new_crtc_state(state, pcrtc);
9188 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
9189 	struct dm_crtc_state *dm_old_crtc_state =
9190 			to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
9191 	int planes_count = 0, vpos, hpos;
9192 	long r;
9193 	unsigned long flags;
9194 	struct amdgpu_bo *abo;
9195 	uint32_t target_vblank, last_flip_vblank;
9196 	bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
9197 	bool cursor_update = false;
9198 	bool pflip_present = false;
9199 	struct {
9200 		struct dc_surface_update surface_updates[MAX_SURFACES];
9201 		struct dc_plane_info plane_infos[MAX_SURFACES];
9202 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
9203 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
9204 		struct dc_stream_update stream_update;
9205 	} *bundle;
9206 
9207 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
9208 
9209 	if (!bundle) {
9210 		dm_error("Failed to allocate update bundle\n");
9211 		goto cleanup;
9212 	}
9213 
9214 	/*
9215 	 * Disable the cursor first if we're disabling all the planes.
9216 	 * It'll remain on the screen after the planes are re-enabled
9217 	 * if we don't.
9218 	 */
9219 	if (acrtc_state->active_planes == 0)
9220 		amdgpu_dm_commit_cursors(state);
9221 
9222 	/* update planes when needed */
9223 	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
9224 		struct drm_crtc *crtc = new_plane_state->crtc;
9225 		struct drm_crtc_state *new_crtc_state;
9226 		struct drm_framebuffer *fb = new_plane_state->fb;
9227 		struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
9228 		bool plane_needs_flip;
9229 		struct dc_plane_state *dc_plane;
9230 		struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
9231 
9232 		/* Cursor plane is handled after stream updates */
9233 		if (plane->type == DRM_PLANE_TYPE_CURSOR) {
9234 			if ((fb && crtc == pcrtc) ||
9235 			    (old_plane_state->fb && old_plane_state->crtc == pcrtc))
9236 				cursor_update = true;
9237 
9238 			continue;
9239 		}
9240 
9241 		if (!fb || !crtc || pcrtc != crtc)
9242 			continue;
9243 
9244 		new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
9245 		if (!new_crtc_state->active)
9246 			continue;
9247 
9248 		dc_plane = dm_new_plane_state->dc_state;
9249 
9250 		bundle->surface_updates[planes_count].surface = dc_plane;
9251 		if (new_pcrtc_state->color_mgmt_changed) {
9252 			bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
9253 			bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
9254 			bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
9255 		}
9256 
9257 		fill_dc_scaling_info(dm->adev, new_plane_state,
9258 				     &bundle->scaling_infos[planes_count]);
9259 
9260 		bundle->surface_updates[planes_count].scaling_info =
9261 			&bundle->scaling_infos[planes_count];
9262 
9263 		plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
9264 
9265 		pflip_present = pflip_present || plane_needs_flip;
9266 
9267 		if (!plane_needs_flip) {
9268 			planes_count += 1;
9269 			continue;
9270 		}
9271 
9272 		abo = gem_to_amdgpu_bo(fb->obj[0]);
9273 
9274 		/*
9275 		 * Wait for all fences on this FB. Do limited wait to avoid
9276 		 * deadlock during GPU reset when this fence will not signal
9277 		 * but we hold reservation lock for the BO.
9278 		 */
9279 		r = dma_resv_wait_timeout(abo->tbo.base.resv,
9280 					  DMA_RESV_USAGE_WRITE, false,
9281 					  msecs_to_jiffies(5000));
9282 		if (unlikely(r <= 0))
9283 			DRM_ERROR("Waiting for fences timed out!");
9284 
9285 		fill_dc_plane_info_and_addr(
9286 			dm->adev, new_plane_state,
9287 			afb->tiling_flags,
9288 			&bundle->plane_infos[planes_count],
9289 			&bundle->flip_addrs[planes_count].address,
9290 			afb->tmz_surface, false);
9291 
9292 		drm_dbg_state(state->dev, "plane: id=%d dcc_en=%d\n",
9293 				 new_plane_state->plane->index,
9294 				 bundle->plane_infos[planes_count].dcc.enable);
9295 
9296 		bundle->surface_updates[planes_count].plane_info =
9297 			&bundle->plane_infos[planes_count];
9298 
9299 		/*
9300 		 * Only allow immediate flips for fast updates that don't
9301 		 * change FB pitch, DCC state, rotation or mirroing.
9302 		 */
9303 		bundle->flip_addrs[planes_count].flip_immediate =
9304 			crtc->state->async_flip &&
9305 			acrtc_state->update_type == UPDATE_TYPE_FAST;
9306 
9307 		timestamp_ns = ktime_get_ns();
9308 		bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
9309 		bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
9310 		bundle->surface_updates[planes_count].surface = dc_plane;
9311 
9312 		if (!bundle->surface_updates[planes_count].surface) {
9313 			DRM_ERROR("No surface for CRTC: id=%d\n",
9314 					acrtc_attach->crtc_id);
9315 			continue;
9316 		}
9317 
9318 		if (plane == pcrtc->primary)
9319 			update_freesync_state_on_stream(
9320 				dm,
9321 				acrtc_state,
9322 				acrtc_state->stream,
9323 				dc_plane,
9324 				bundle->flip_addrs[planes_count].flip_timestamp_in_us);
9325 
9326 		drm_dbg_state(state->dev, "%s Flipping to hi: 0x%x, low: 0x%x\n",
9327 				 __func__,
9328 				 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
9329 				 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
9330 
9331 		planes_count += 1;
9332 
9333 	}
9334 
9335 	if (pflip_present) {
9336 		if (!vrr_active) {
9337 			/* Use old throttling in non-vrr fixed refresh rate mode
9338 			 * to keep flip scheduling based on target vblank counts
9339 			 * working in a backwards compatible way, e.g., for
9340 			 * clients using the GLX_OML_sync_control extension or
9341 			 * DRI3/Present extension with defined target_msc.
9342 			 */
9343 			last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
9344 		}
9345 		else {
9346 			/* For variable refresh rate mode only:
9347 			 * Get vblank of last completed flip to avoid > 1 vrr
9348 			 * flips per video frame by use of throttling, but allow
9349 			 * flip programming anywhere in the possibly large
9350 			 * variable vrr vblank interval for fine-grained flip
9351 			 * timing control and more opportunity to avoid stutter
9352 			 * on late submission of flips.
9353 			 */
9354 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9355 			last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
9356 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9357 		}
9358 
9359 		target_vblank = last_flip_vblank + wait_for_vblank;
9360 
9361 		/*
9362 		 * Wait until we're out of the vertical blank period before the one
9363 		 * targeted by the flip
9364 		 */
9365 		while ((acrtc_attach->enabled &&
9366 			(amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
9367 							    0, &vpos, &hpos, NULL,
9368 							    NULL, &pcrtc->hwmode)
9369 			 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
9370 			(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
9371 			(int)(target_vblank -
9372 			  amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
9373 			usleep_range(1000, 1100);
9374 		}
9375 
9376 		/**
9377 		 * Prepare the flip event for the pageflip interrupt to handle.
9378 		 *
9379 		 * This only works in the case where we've already turned on the
9380 		 * appropriate hardware blocks (eg. HUBP) so in the transition case
9381 		 * from 0 -> n planes we have to skip a hardware generated event
9382 		 * and rely on sending it from software.
9383 		 */
9384 		if (acrtc_attach->base.state->event &&
9385 		    acrtc_state->active_planes > 0 &&
9386 		    !acrtc_state->force_dpms_off) {
9387 			drm_crtc_vblank_get(pcrtc);
9388 
9389 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9390 
9391 			WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
9392 			prepare_flip_isr(acrtc_attach);
9393 
9394 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9395 		}
9396 
9397 		if (acrtc_state->stream) {
9398 			if (acrtc_state->freesync_vrr_info_changed)
9399 				bundle->stream_update.vrr_infopacket =
9400 					&acrtc_state->stream->vrr_infopacket;
9401 		}
9402 	} else if (cursor_update && acrtc_state->active_planes > 0 &&
9403 		   !acrtc_state->force_dpms_off &&
9404 		   acrtc_attach->base.state->event) {
9405 		drm_crtc_vblank_get(pcrtc);
9406 
9407 		spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9408 
9409 		acrtc_attach->event = acrtc_attach->base.state->event;
9410 		acrtc_attach->base.state->event = NULL;
9411 
9412 		spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9413 	}
9414 
9415 	/* Update the planes if changed or disable if we don't have any. */
9416 	if ((planes_count || acrtc_state->active_planes == 0) &&
9417 		acrtc_state->stream) {
9418 		/*
9419 		 * If PSR or idle optimizations are enabled then flush out
9420 		 * any pending work before hardware programming.
9421 		 */
9422 		if (dm->vblank_control_workqueue)
9423 			flush_workqueue(dm->vblank_control_workqueue);
9424 
9425 		bundle->stream_update.stream = acrtc_state->stream;
9426 		if (new_pcrtc_state->mode_changed) {
9427 			bundle->stream_update.src = acrtc_state->stream->src;
9428 			bundle->stream_update.dst = acrtc_state->stream->dst;
9429 		}
9430 
9431 		if (new_pcrtc_state->color_mgmt_changed) {
9432 			/*
9433 			 * TODO: This isn't fully correct since we've actually
9434 			 * already modified the stream in place.
9435 			 */
9436 			bundle->stream_update.gamut_remap =
9437 				&acrtc_state->stream->gamut_remap_matrix;
9438 			bundle->stream_update.output_csc_transform =
9439 				&acrtc_state->stream->csc_color_matrix;
9440 			bundle->stream_update.out_transfer_func =
9441 				acrtc_state->stream->out_transfer_func;
9442 		}
9443 
9444 		acrtc_state->stream->abm_level = acrtc_state->abm_level;
9445 		if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
9446 			bundle->stream_update.abm_level = &acrtc_state->abm_level;
9447 
9448 		/*
9449 		 * If FreeSync state on the stream has changed then we need to
9450 		 * re-adjust the min/max bounds now that DC doesn't handle this
9451 		 * as part of commit.
9452 		 */
9453 		if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
9454 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9455 			dc_stream_adjust_vmin_vmax(
9456 				dm->dc, acrtc_state->stream,
9457 				&acrtc_attach->dm_irq_params.vrr_params.adjust);
9458 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9459 		}
9460 		mutex_lock(&dm->dc_lock);
9461 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
9462 				acrtc_state->stream->link->psr_settings.psr_allow_active)
9463 			amdgpu_dm_psr_disable(acrtc_state->stream);
9464 
9465 		dc_commit_updates_for_stream(dm->dc,
9466 						     bundle->surface_updates,
9467 						     planes_count,
9468 						     acrtc_state->stream,
9469 						     &bundle->stream_update,
9470 						     dc_state);
9471 
9472 		/**
9473 		 * Enable or disable the interrupts on the backend.
9474 		 *
9475 		 * Most pipes are put into power gating when unused.
9476 		 *
9477 		 * When power gating is enabled on a pipe we lose the
9478 		 * interrupt enablement state when power gating is disabled.
9479 		 *
9480 		 * So we need to update the IRQ control state in hardware
9481 		 * whenever the pipe turns on (since it could be previously
9482 		 * power gated) or off (since some pipes can't be power gated
9483 		 * on some ASICs).
9484 		 */
9485 		if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
9486 			dm_update_pflip_irq_state(drm_to_adev(dev),
9487 						  acrtc_attach);
9488 
9489 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
9490 				acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
9491 				!acrtc_state->stream->link->psr_settings.psr_feature_enabled)
9492 			amdgpu_dm_link_setup_psr(acrtc_state->stream);
9493 
9494 		/* Decrement skip count when PSR is enabled and we're doing fast updates. */
9495 		if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
9496 		    acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
9497 			struct amdgpu_dm_connector *aconn =
9498 				(struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
9499 
9500 			if (aconn->psr_skip_count > 0)
9501 				aconn->psr_skip_count--;
9502 
9503 			/* Allow PSR when skip count is 0. */
9504 			acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count;
9505 		} else {
9506 			acrtc_attach->dm_irq_params.allow_psr_entry = false;
9507 		}
9508 
9509 		mutex_unlock(&dm->dc_lock);
9510 	}
9511 
9512 	/*
9513 	 * Update cursor state *after* programming all the planes.
9514 	 * This avoids redundant programming in the case where we're going
9515 	 * to be disabling a single plane - those pipes are being disabled.
9516 	 */
9517 	if (acrtc_state->active_planes)
9518 		amdgpu_dm_commit_cursors(state);
9519 
9520 cleanup:
9521 	kfree(bundle);
9522 }
9523 
9524 static void amdgpu_dm_commit_audio(struct drm_device *dev,
9525 				   struct drm_atomic_state *state)
9526 {
9527 	struct amdgpu_device *adev = drm_to_adev(dev);
9528 	struct amdgpu_dm_connector *aconnector;
9529 	struct drm_connector *connector;
9530 	struct drm_connector_state *old_con_state, *new_con_state;
9531 	struct drm_crtc_state *new_crtc_state;
9532 	struct dm_crtc_state *new_dm_crtc_state;
9533 	const struct dc_stream_status *status;
9534 	int i, inst;
9535 
9536 	/* Notify device removals. */
9537 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9538 		if (old_con_state->crtc != new_con_state->crtc) {
9539 			/* CRTC changes require notification. */
9540 			goto notify;
9541 		}
9542 
9543 		if (!new_con_state->crtc)
9544 			continue;
9545 
9546 		new_crtc_state = drm_atomic_get_new_crtc_state(
9547 			state, new_con_state->crtc);
9548 
9549 		if (!new_crtc_state)
9550 			continue;
9551 
9552 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9553 			continue;
9554 
9555 	notify:
9556 		aconnector = to_amdgpu_dm_connector(connector);
9557 
9558 		mutex_lock(&adev->dm.audio_lock);
9559 		inst = aconnector->audio_inst;
9560 		aconnector->audio_inst = -1;
9561 		mutex_unlock(&adev->dm.audio_lock);
9562 
9563 		amdgpu_dm_audio_eld_notify(adev, inst);
9564 	}
9565 
9566 	/* Notify audio device additions. */
9567 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
9568 		if (!new_con_state->crtc)
9569 			continue;
9570 
9571 		new_crtc_state = drm_atomic_get_new_crtc_state(
9572 			state, new_con_state->crtc);
9573 
9574 		if (!new_crtc_state)
9575 			continue;
9576 
9577 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9578 			continue;
9579 
9580 		new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
9581 		if (!new_dm_crtc_state->stream)
9582 			continue;
9583 
9584 		status = dc_stream_get_status(new_dm_crtc_state->stream);
9585 		if (!status)
9586 			continue;
9587 
9588 		aconnector = to_amdgpu_dm_connector(connector);
9589 
9590 		mutex_lock(&adev->dm.audio_lock);
9591 		inst = status->audio_inst;
9592 		aconnector->audio_inst = inst;
9593 		mutex_unlock(&adev->dm.audio_lock);
9594 
9595 		amdgpu_dm_audio_eld_notify(adev, inst);
9596 	}
9597 }
9598 
9599 /*
9600  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
9601  * @crtc_state: the DRM CRTC state
9602  * @stream_state: the DC stream state.
9603  *
9604  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
9605  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
9606  */
9607 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
9608 						struct dc_stream_state *stream_state)
9609 {
9610 	stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
9611 }
9612 
9613 /**
9614  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
9615  * @state: The atomic state to commit
9616  *
9617  * This will tell DC to commit the constructed DC state from atomic_check,
9618  * programming the hardware. Any failures here implies a hardware failure, since
9619  * atomic check should have filtered anything non-kosher.
9620  */
9621 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
9622 {
9623 	struct drm_device *dev = state->dev;
9624 	struct amdgpu_device *adev = drm_to_adev(dev);
9625 	struct amdgpu_display_manager *dm = &adev->dm;
9626 	struct dm_atomic_state *dm_state;
9627 	struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
9628 	uint32_t i, j;
9629 	struct drm_crtc *crtc;
9630 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9631 	unsigned long flags;
9632 	bool wait_for_vblank = true;
9633 	struct drm_connector *connector;
9634 	struct drm_connector_state *old_con_state, *new_con_state;
9635 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9636 	int crtc_disable_count = 0;
9637 	bool mode_set_reset_required = false;
9638 
9639 	trace_amdgpu_dm_atomic_commit_tail_begin(state);
9640 
9641 	drm_atomic_helper_update_legacy_modeset_state(dev, state);
9642 
9643 	dm_state = dm_atomic_get_new_state(state);
9644 	if (dm_state && dm_state->context) {
9645 		dc_state = dm_state->context;
9646 	} else {
9647 		/* No state changes, retain current state. */
9648 		dc_state_temp = dc_create_state(dm->dc);
9649 		ASSERT(dc_state_temp);
9650 		dc_state = dc_state_temp;
9651 		dc_resource_state_copy_construct_current(dm->dc, dc_state);
9652 	}
9653 
9654 	for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
9655 				       new_crtc_state, i) {
9656 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9657 
9658 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9659 
9660 		if (old_crtc_state->active &&
9661 		    (!new_crtc_state->active ||
9662 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9663 			manage_dm_interrupts(adev, acrtc, false);
9664 			dc_stream_release(dm_old_crtc_state->stream);
9665 		}
9666 	}
9667 
9668 	drm_atomic_helper_calc_timestamping_constants(state);
9669 
9670 	/* update changed items */
9671 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9672 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9673 
9674 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9675 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9676 
9677 		drm_dbg_state(state->dev,
9678 			"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9679 			"planes_changed:%d, mode_changed:%d,active_changed:%d,"
9680 			"connectors_changed:%d\n",
9681 			acrtc->crtc_id,
9682 			new_crtc_state->enable,
9683 			new_crtc_state->active,
9684 			new_crtc_state->planes_changed,
9685 			new_crtc_state->mode_changed,
9686 			new_crtc_state->active_changed,
9687 			new_crtc_state->connectors_changed);
9688 
9689 		/* Disable cursor if disabling crtc */
9690 		if (old_crtc_state->active && !new_crtc_state->active) {
9691 			struct dc_cursor_position position;
9692 
9693 			memset(&position, 0, sizeof(position));
9694 			mutex_lock(&dm->dc_lock);
9695 			dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
9696 			mutex_unlock(&dm->dc_lock);
9697 		}
9698 
9699 		/* Copy all transient state flags into dc state */
9700 		if (dm_new_crtc_state->stream) {
9701 			amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
9702 							    dm_new_crtc_state->stream);
9703 		}
9704 
9705 		/* handles headless hotplug case, updating new_state and
9706 		 * aconnector as needed
9707 		 */
9708 
9709 		if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
9710 
9711 			DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
9712 
9713 			if (!dm_new_crtc_state->stream) {
9714 				/*
9715 				 * this could happen because of issues with
9716 				 * userspace notifications delivery.
9717 				 * In this case userspace tries to set mode on
9718 				 * display which is disconnected in fact.
9719 				 * dc_sink is NULL in this case on aconnector.
9720 				 * We expect reset mode will come soon.
9721 				 *
9722 				 * This can also happen when unplug is done
9723 				 * during resume sequence ended
9724 				 *
9725 				 * In this case, we want to pretend we still
9726 				 * have a sink to keep the pipe running so that
9727 				 * hw state is consistent with the sw state
9728 				 */
9729 				DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9730 						__func__, acrtc->base.base.id);
9731 				continue;
9732 			}
9733 
9734 			if (dm_old_crtc_state->stream)
9735 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
9736 
9737 			pm_runtime_get_noresume(dev->dev);
9738 
9739 			acrtc->enabled = true;
9740 			acrtc->hw_mode = new_crtc_state->mode;
9741 			crtc->hwmode = new_crtc_state->mode;
9742 			mode_set_reset_required = true;
9743 		} else if (modereset_required(new_crtc_state)) {
9744 			DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
9745 			/* i.e. reset mode */
9746 			if (dm_old_crtc_state->stream)
9747 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
9748 
9749 			mode_set_reset_required = true;
9750 		}
9751 	} /* for_each_crtc_in_state() */
9752 
9753 	if (dc_state) {
9754 		/* if there mode set or reset, disable eDP PSR */
9755 		if (mode_set_reset_required) {
9756 			if (dm->vblank_control_workqueue)
9757 				flush_workqueue(dm->vblank_control_workqueue);
9758 
9759 			amdgpu_dm_psr_disable_all(dm);
9760 		}
9761 
9762 		dm_enable_per_frame_crtc_master_sync(dc_state);
9763 		mutex_lock(&dm->dc_lock);
9764 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
9765 
9766 		/* Allow idle optimization when vblank count is 0 for display off */
9767 		if (dm->active_vblank_irq_count == 0)
9768 			dc_allow_idle_optimizations(dm->dc, true);
9769 		mutex_unlock(&dm->dc_lock);
9770 	}
9771 
9772 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9773 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9774 
9775 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9776 
9777 		if (dm_new_crtc_state->stream != NULL) {
9778 			const struct dc_stream_status *status =
9779 					dc_stream_get_status(dm_new_crtc_state->stream);
9780 
9781 			if (!status)
9782 				status = dc_stream_get_status_from_state(dc_state,
9783 									 dm_new_crtc_state->stream);
9784 			if (!status)
9785 				DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
9786 			else
9787 				acrtc->otg_inst = status->primary_otg_inst;
9788 		}
9789 	}
9790 #ifdef CONFIG_DRM_AMD_DC_HDCP
9791 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9792 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9793 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9794 		struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9795 
9796 		new_crtc_state = NULL;
9797 
9798 		if (acrtc)
9799 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9800 
9801 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9802 
9803 		if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
9804 		    connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
9805 			hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
9806 			new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
9807 			dm_new_con_state->update_hdcp = true;
9808 			continue;
9809 		}
9810 
9811 		if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
9812 			hdcp_update_display(
9813 				adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
9814 				new_con_state->hdcp_content_type,
9815 				new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
9816 	}
9817 #endif
9818 
9819 	/* Handle connector state changes */
9820 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9821 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9822 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9823 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9824 		struct dc_surface_update dummy_updates[MAX_SURFACES];
9825 		struct dc_stream_update stream_update;
9826 		struct dc_info_packet hdr_packet;
9827 		struct dc_stream_status *status = NULL;
9828 		bool abm_changed, hdr_changed, scaling_changed;
9829 
9830 		memset(&dummy_updates, 0, sizeof(dummy_updates));
9831 		memset(&stream_update, 0, sizeof(stream_update));
9832 
9833 		if (acrtc) {
9834 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9835 			old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
9836 		}
9837 
9838 		/* Skip any modesets/resets */
9839 		if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
9840 			continue;
9841 
9842 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9843 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9844 
9845 		scaling_changed = is_scaling_state_different(dm_new_con_state,
9846 							     dm_old_con_state);
9847 
9848 		abm_changed = dm_new_crtc_state->abm_level !=
9849 			      dm_old_crtc_state->abm_level;
9850 
9851 		hdr_changed =
9852 			!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
9853 
9854 		if (!scaling_changed && !abm_changed && !hdr_changed)
9855 			continue;
9856 
9857 		stream_update.stream = dm_new_crtc_state->stream;
9858 		if (scaling_changed) {
9859 			update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
9860 					dm_new_con_state, dm_new_crtc_state->stream);
9861 
9862 			stream_update.src = dm_new_crtc_state->stream->src;
9863 			stream_update.dst = dm_new_crtc_state->stream->dst;
9864 		}
9865 
9866 		if (abm_changed) {
9867 			dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
9868 
9869 			stream_update.abm_level = &dm_new_crtc_state->abm_level;
9870 		}
9871 
9872 		if (hdr_changed) {
9873 			fill_hdr_info_packet(new_con_state, &hdr_packet);
9874 			stream_update.hdr_static_metadata = &hdr_packet;
9875 		}
9876 
9877 		status = dc_stream_get_status(dm_new_crtc_state->stream);
9878 
9879 		if (WARN_ON(!status))
9880 			continue;
9881 
9882 		WARN_ON(!status->plane_count);
9883 
9884 		/*
9885 		 * TODO: DC refuses to perform stream updates without a dc_surface_update.
9886 		 * Here we create an empty update on each plane.
9887 		 * To fix this, DC should permit updating only stream properties.
9888 		 */
9889 		for (j = 0; j < status->plane_count; j++)
9890 			dummy_updates[j].surface = status->plane_states[0];
9891 
9892 
9893 		mutex_lock(&dm->dc_lock);
9894 		dc_commit_updates_for_stream(dm->dc,
9895 						     dummy_updates,
9896 						     status->plane_count,
9897 						     dm_new_crtc_state->stream,
9898 						     &stream_update,
9899 						     dc_state);
9900 		mutex_unlock(&dm->dc_lock);
9901 	}
9902 
9903 	/* Count number of newly disabled CRTCs for dropping PM refs later. */
9904 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
9905 				      new_crtc_state, i) {
9906 		if (old_crtc_state->active && !new_crtc_state->active)
9907 			crtc_disable_count++;
9908 
9909 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9910 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9911 
9912 		/* For freesync config update on crtc state and params for irq */
9913 		update_stream_irq_parameters(dm, dm_new_crtc_state);
9914 
9915 		/* Handle vrr on->off / off->on transitions */
9916 		amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
9917 						dm_new_crtc_state);
9918 	}
9919 
9920 	/**
9921 	 * Enable interrupts for CRTCs that are newly enabled or went through
9922 	 * a modeset. It was intentionally deferred until after the front end
9923 	 * state was modified to wait until the OTG was on and so the IRQ
9924 	 * handlers didn't access stale or invalid state.
9925 	 */
9926 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9927 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9928 #ifdef CONFIG_DEBUG_FS
9929 		bool configure_crc = false;
9930 		enum amdgpu_dm_pipe_crc_source cur_crc_src;
9931 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9932 		struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk;
9933 #endif
9934 		spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9935 		cur_crc_src = acrtc->dm_irq_params.crc_src;
9936 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9937 #endif
9938 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9939 
9940 		if (new_crtc_state->active &&
9941 		    (!old_crtc_state->active ||
9942 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9943 			dc_stream_retain(dm_new_crtc_state->stream);
9944 			acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
9945 			manage_dm_interrupts(adev, acrtc, true);
9946 
9947 #ifdef CONFIG_DEBUG_FS
9948 			/**
9949 			 * Frontend may have changed so reapply the CRC capture
9950 			 * settings for the stream.
9951 			 */
9952 			dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9953 
9954 			if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
9955 				configure_crc = true;
9956 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9957 				if (amdgpu_dm_crc_window_is_activated(crtc)) {
9958 					spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9959 					acrtc->dm_irq_params.crc_window.update_win = true;
9960 					acrtc->dm_irq_params.crc_window.skip_frame_cnt = 2;
9961 					spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
9962 					crc_rd_wrk->crtc = crtc;
9963 					spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
9964 					spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9965 				}
9966 #endif
9967 			}
9968 
9969 			if (configure_crc)
9970 				if (amdgpu_dm_crtc_configure_crc_source(
9971 					crtc, dm_new_crtc_state, cur_crc_src))
9972 					DRM_DEBUG_DRIVER("Failed to configure crc source");
9973 #endif
9974 		}
9975 	}
9976 
9977 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
9978 		if (new_crtc_state->async_flip)
9979 			wait_for_vblank = false;
9980 
9981 	/* update planes when needed per crtc*/
9982 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
9983 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9984 
9985 		if (dm_new_crtc_state->stream)
9986 			amdgpu_dm_commit_planes(state, dc_state, dev,
9987 						dm, crtc, wait_for_vblank);
9988 	}
9989 
9990 	/* Update audio instances for each connector. */
9991 	amdgpu_dm_commit_audio(dev, state);
9992 
9993 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||		\
9994 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
9995 	/* restore the backlight level */
9996 	for (i = 0; i < dm->num_of_edps; i++) {
9997 		if (dm->backlight_dev[i] &&
9998 		    (dm->actual_brightness[i] != dm->brightness[i]))
9999 			amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
10000 	}
10001 #endif
10002 	/*
10003 	 * send vblank event on all events not handled in flip and
10004 	 * mark consumed event for drm_atomic_helper_commit_hw_done
10005 	 */
10006 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
10007 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
10008 
10009 		if (new_crtc_state->event)
10010 			drm_send_event_locked(dev, &new_crtc_state->event->base);
10011 
10012 		new_crtc_state->event = NULL;
10013 	}
10014 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
10015 
10016 	/* Signal HW programming completion */
10017 	drm_atomic_helper_commit_hw_done(state);
10018 
10019 	if (wait_for_vblank)
10020 		drm_atomic_helper_wait_for_flip_done(dev, state);
10021 
10022 	drm_atomic_helper_cleanup_planes(dev, state);
10023 
10024 	/* return the stolen vga memory back to VRAM */
10025 	if (!adev->mman.keep_stolen_vga_memory)
10026 		amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
10027 	amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
10028 
10029 	/*
10030 	 * Finally, drop a runtime PM reference for each newly disabled CRTC,
10031 	 * so we can put the GPU into runtime suspend if we're not driving any
10032 	 * displays anymore
10033 	 */
10034 	for (i = 0; i < crtc_disable_count; i++)
10035 		pm_runtime_put_autosuspend(dev->dev);
10036 	pm_runtime_mark_last_busy(dev->dev);
10037 
10038 	if (dc_state_temp)
10039 		dc_release_state(dc_state_temp);
10040 }
10041 
10042 
10043 static int dm_force_atomic_commit(struct drm_connector *connector)
10044 {
10045 	int ret = 0;
10046 	struct drm_device *ddev = connector->dev;
10047 	struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
10048 	struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
10049 	struct drm_plane *plane = disconnected_acrtc->base.primary;
10050 	struct drm_connector_state *conn_state;
10051 	struct drm_crtc_state *crtc_state;
10052 	struct drm_plane_state *plane_state;
10053 
10054 	if (!state)
10055 		return -ENOMEM;
10056 
10057 	state->acquire_ctx = ddev->mode_config.acquire_ctx;
10058 
10059 	/* Construct an atomic state to restore previous display setting */
10060 
10061 	/*
10062 	 * Attach connectors to drm_atomic_state
10063 	 */
10064 	conn_state = drm_atomic_get_connector_state(state, connector);
10065 
10066 	ret = PTR_ERR_OR_ZERO(conn_state);
10067 	if (ret)
10068 		goto out;
10069 
10070 	/* Attach crtc to drm_atomic_state*/
10071 	crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
10072 
10073 	ret = PTR_ERR_OR_ZERO(crtc_state);
10074 	if (ret)
10075 		goto out;
10076 
10077 	/* force a restore */
10078 	crtc_state->mode_changed = true;
10079 
10080 	/* Attach plane to drm_atomic_state */
10081 	plane_state = drm_atomic_get_plane_state(state, plane);
10082 
10083 	ret = PTR_ERR_OR_ZERO(plane_state);
10084 	if (ret)
10085 		goto out;
10086 
10087 	/* Call commit internally with the state we just constructed */
10088 	ret = drm_atomic_commit(state);
10089 
10090 out:
10091 	drm_atomic_state_put(state);
10092 	if (ret)
10093 		DRM_ERROR("Restoring old state failed with %i\n", ret);
10094 
10095 	return ret;
10096 }
10097 
10098 /*
10099  * This function handles all cases when set mode does not come upon hotplug.
10100  * This includes when a display is unplugged then plugged back into the
10101  * same port and when running without usermode desktop manager supprot
10102  */
10103 void dm_restore_drm_connector_state(struct drm_device *dev,
10104 				    struct drm_connector *connector)
10105 {
10106 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
10107 	struct amdgpu_crtc *disconnected_acrtc;
10108 	struct dm_crtc_state *acrtc_state;
10109 
10110 	if (!aconnector->dc_sink || !connector->state || !connector->encoder)
10111 		return;
10112 
10113 	disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
10114 	if (!disconnected_acrtc)
10115 		return;
10116 
10117 	acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
10118 	if (!acrtc_state->stream)
10119 		return;
10120 
10121 	/*
10122 	 * If the previous sink is not released and different from the current,
10123 	 * we deduce we are in a state where we can not rely on usermode call
10124 	 * to turn on the display, so we do it here
10125 	 */
10126 	if (acrtc_state->stream->sink != aconnector->dc_sink)
10127 		dm_force_atomic_commit(&aconnector->base);
10128 }
10129 
10130 /*
10131  * Grabs all modesetting locks to serialize against any blocking commits,
10132  * Waits for completion of all non blocking commits.
10133  */
10134 static int do_aquire_global_lock(struct drm_device *dev,
10135 				 struct drm_atomic_state *state)
10136 {
10137 	struct drm_crtc *crtc;
10138 	struct drm_crtc_commit *commit;
10139 	long ret;
10140 
10141 	/*
10142 	 * Adding all modeset locks to aquire_ctx will
10143 	 * ensure that when the framework release it the
10144 	 * extra locks we are locking here will get released to
10145 	 */
10146 	ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
10147 	if (ret)
10148 		return ret;
10149 
10150 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
10151 		spin_lock(&crtc->commit_lock);
10152 		commit = list_first_entry_or_null(&crtc->commit_list,
10153 				struct drm_crtc_commit, commit_entry);
10154 		if (commit)
10155 			drm_crtc_commit_get(commit);
10156 		spin_unlock(&crtc->commit_lock);
10157 
10158 		if (!commit)
10159 			continue;
10160 
10161 		/*
10162 		 * Make sure all pending HW programming completed and
10163 		 * page flips done
10164 		 */
10165 		ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
10166 
10167 		if (ret > 0)
10168 			ret = wait_for_completion_interruptible_timeout(
10169 					&commit->flip_done, 10*HZ);
10170 
10171 		if (ret == 0)
10172 			DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
10173 				  "timed out\n", crtc->base.id, crtc->name);
10174 
10175 		drm_crtc_commit_put(commit);
10176 	}
10177 
10178 	return ret < 0 ? ret : 0;
10179 }
10180 
10181 static void get_freesync_config_for_crtc(
10182 	struct dm_crtc_state *new_crtc_state,
10183 	struct dm_connector_state *new_con_state)
10184 {
10185 	struct mod_freesync_config config = {0};
10186 	struct amdgpu_dm_connector *aconnector =
10187 			to_amdgpu_dm_connector(new_con_state->base.connector);
10188 	struct drm_display_mode *mode = &new_crtc_state->base.mode;
10189 	int vrefresh = drm_mode_vrefresh(mode);
10190 	bool fs_vid_mode = false;
10191 
10192 	new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
10193 					vrefresh >= aconnector->min_vfreq &&
10194 					vrefresh <= aconnector->max_vfreq;
10195 
10196 	if (new_crtc_state->vrr_supported) {
10197 		new_crtc_state->stream->ignore_msa_timing_param = true;
10198 		fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
10199 
10200 		config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
10201 		config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
10202 		config.vsif_supported = true;
10203 		config.btr = true;
10204 
10205 		if (fs_vid_mode) {
10206 			config.state = VRR_STATE_ACTIVE_FIXED;
10207 			config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
10208 			goto out;
10209 		} else if (new_crtc_state->base.vrr_enabled) {
10210 			config.state = VRR_STATE_ACTIVE_VARIABLE;
10211 		} else {
10212 			config.state = VRR_STATE_INACTIVE;
10213 		}
10214 	}
10215 out:
10216 	new_crtc_state->freesync_config = config;
10217 }
10218 
10219 static void reset_freesync_config_for_crtc(
10220 	struct dm_crtc_state *new_crtc_state)
10221 {
10222 	new_crtc_state->vrr_supported = false;
10223 
10224 	memset(&new_crtc_state->vrr_infopacket, 0,
10225 	       sizeof(new_crtc_state->vrr_infopacket));
10226 }
10227 
10228 static bool
10229 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
10230 				 struct drm_crtc_state *new_crtc_state)
10231 {
10232 	const struct drm_display_mode *old_mode, *new_mode;
10233 
10234 	if (!old_crtc_state || !new_crtc_state)
10235 		return false;
10236 
10237 	old_mode = &old_crtc_state->mode;
10238 	new_mode = &new_crtc_state->mode;
10239 
10240 	if (old_mode->clock       == new_mode->clock &&
10241 	    old_mode->hdisplay    == new_mode->hdisplay &&
10242 	    old_mode->vdisplay    == new_mode->vdisplay &&
10243 	    old_mode->htotal      == new_mode->htotal &&
10244 	    old_mode->vtotal      != new_mode->vtotal &&
10245 	    old_mode->hsync_start == new_mode->hsync_start &&
10246 	    old_mode->vsync_start != new_mode->vsync_start &&
10247 	    old_mode->hsync_end   == new_mode->hsync_end &&
10248 	    old_mode->vsync_end   != new_mode->vsync_end &&
10249 	    old_mode->hskew       == new_mode->hskew &&
10250 	    old_mode->vscan       == new_mode->vscan &&
10251 	    (old_mode->vsync_end - old_mode->vsync_start) ==
10252 	    (new_mode->vsync_end - new_mode->vsync_start))
10253 		return true;
10254 
10255 	return false;
10256 }
10257 
10258 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
10259 	uint64_t num, den, res;
10260 	struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
10261 
10262 	dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
10263 
10264 	num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
10265 	den = (unsigned long long)new_crtc_state->mode.htotal *
10266 	      (unsigned long long)new_crtc_state->mode.vtotal;
10267 
10268 	res = div_u64(num, den);
10269 	dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
10270 }
10271 
10272 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
10273 			 struct drm_atomic_state *state,
10274 			 struct drm_crtc *crtc,
10275 			 struct drm_crtc_state *old_crtc_state,
10276 			 struct drm_crtc_state *new_crtc_state,
10277 			 bool enable,
10278 			 bool *lock_and_validation_needed)
10279 {
10280 	struct dm_atomic_state *dm_state = NULL;
10281 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
10282 	struct dc_stream_state *new_stream;
10283 	int ret = 0;
10284 
10285 	/*
10286 	 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
10287 	 * update changed items
10288 	 */
10289 	struct amdgpu_crtc *acrtc = NULL;
10290 	struct amdgpu_dm_connector *aconnector = NULL;
10291 	struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
10292 	struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
10293 
10294 	new_stream = NULL;
10295 
10296 	dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10297 	dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10298 	acrtc = to_amdgpu_crtc(crtc);
10299 	aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
10300 
10301 	/* TODO This hack should go away */
10302 	if (aconnector && enable) {
10303 		/* Make sure fake sink is created in plug-in scenario */
10304 		drm_new_conn_state = drm_atomic_get_new_connector_state(state,
10305 							    &aconnector->base);
10306 		drm_old_conn_state = drm_atomic_get_old_connector_state(state,
10307 							    &aconnector->base);
10308 
10309 		if (IS_ERR(drm_new_conn_state)) {
10310 			ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
10311 			goto fail;
10312 		}
10313 
10314 		dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
10315 		dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
10316 
10317 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10318 			goto skip_modeset;
10319 
10320 		new_stream = create_validate_stream_for_sink(aconnector,
10321 							     &new_crtc_state->mode,
10322 							     dm_new_conn_state,
10323 							     dm_old_crtc_state->stream);
10324 
10325 		/*
10326 		 * we can have no stream on ACTION_SET if a display
10327 		 * was disconnected during S3, in this case it is not an
10328 		 * error, the OS will be updated after detection, and
10329 		 * will do the right thing on next atomic commit
10330 		 */
10331 
10332 		if (!new_stream) {
10333 			DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
10334 					__func__, acrtc->base.base.id);
10335 			ret = -ENOMEM;
10336 			goto fail;
10337 		}
10338 
10339 		/*
10340 		 * TODO: Check VSDB bits to decide whether this should
10341 		 * be enabled or not.
10342 		 */
10343 		new_stream->triggered_crtc_reset.enabled =
10344 			dm->force_timing_sync;
10345 
10346 		dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10347 
10348 		ret = fill_hdr_info_packet(drm_new_conn_state,
10349 					   &new_stream->hdr_static_metadata);
10350 		if (ret)
10351 			goto fail;
10352 
10353 		/*
10354 		 * If we already removed the old stream from the context
10355 		 * (and set the new stream to NULL) then we can't reuse
10356 		 * the old stream even if the stream and scaling are unchanged.
10357 		 * We'll hit the BUG_ON and black screen.
10358 		 *
10359 		 * TODO: Refactor this function to allow this check to work
10360 		 * in all conditions.
10361 		 */
10362 		if (dm_new_crtc_state->stream &&
10363 		    is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
10364 			goto skip_modeset;
10365 
10366 		if (dm_new_crtc_state->stream &&
10367 		    dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
10368 		    dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
10369 			new_crtc_state->mode_changed = false;
10370 			DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
10371 					 new_crtc_state->mode_changed);
10372 		}
10373 	}
10374 
10375 	/* mode_changed flag may get updated above, need to check again */
10376 	if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10377 		goto skip_modeset;
10378 
10379 	drm_dbg_state(state->dev,
10380 		"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
10381 		"planes_changed:%d, mode_changed:%d,active_changed:%d,"
10382 		"connectors_changed:%d\n",
10383 		acrtc->crtc_id,
10384 		new_crtc_state->enable,
10385 		new_crtc_state->active,
10386 		new_crtc_state->planes_changed,
10387 		new_crtc_state->mode_changed,
10388 		new_crtc_state->active_changed,
10389 		new_crtc_state->connectors_changed);
10390 
10391 	/* Remove stream for any changed/disabled CRTC */
10392 	if (!enable) {
10393 
10394 		if (!dm_old_crtc_state->stream)
10395 			goto skip_modeset;
10396 
10397 		if (dm_new_crtc_state->stream &&
10398 		    is_timing_unchanged_for_freesync(new_crtc_state,
10399 						     old_crtc_state)) {
10400 			new_crtc_state->mode_changed = false;
10401 			DRM_DEBUG_DRIVER(
10402 				"Mode change not required for front porch change, "
10403 				"setting mode_changed to %d",
10404 				new_crtc_state->mode_changed);
10405 
10406 			set_freesync_fixed_config(dm_new_crtc_state);
10407 
10408 			goto skip_modeset;
10409 		} else if (aconnector &&
10410 			   is_freesync_video_mode(&new_crtc_state->mode,
10411 						  aconnector)) {
10412 			struct drm_display_mode *high_mode;
10413 
10414 			high_mode = get_highest_refresh_rate_mode(aconnector, false);
10415 			if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) {
10416 				set_freesync_fixed_config(dm_new_crtc_state);
10417 			}
10418 		}
10419 
10420 		ret = dm_atomic_get_state(state, &dm_state);
10421 		if (ret)
10422 			goto fail;
10423 
10424 		DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
10425 				crtc->base.id);
10426 
10427 		/* i.e. reset mode */
10428 		if (dc_remove_stream_from_ctx(
10429 				dm->dc,
10430 				dm_state->context,
10431 				dm_old_crtc_state->stream) != DC_OK) {
10432 			ret = -EINVAL;
10433 			goto fail;
10434 		}
10435 
10436 		dc_stream_release(dm_old_crtc_state->stream);
10437 		dm_new_crtc_state->stream = NULL;
10438 
10439 		reset_freesync_config_for_crtc(dm_new_crtc_state);
10440 
10441 		*lock_and_validation_needed = true;
10442 
10443 	} else {/* Add stream for any updated/enabled CRTC */
10444 		/*
10445 		 * Quick fix to prevent NULL pointer on new_stream when
10446 		 * added MST connectors not found in existing crtc_state in the chained mode
10447 		 * TODO: need to dig out the root cause of that
10448 		 */
10449 		if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
10450 			goto skip_modeset;
10451 
10452 		if (modereset_required(new_crtc_state))
10453 			goto skip_modeset;
10454 
10455 		if (modeset_required(new_crtc_state, new_stream,
10456 				     dm_old_crtc_state->stream)) {
10457 
10458 			WARN_ON(dm_new_crtc_state->stream);
10459 
10460 			ret = dm_atomic_get_state(state, &dm_state);
10461 			if (ret)
10462 				goto fail;
10463 
10464 			dm_new_crtc_state->stream = new_stream;
10465 
10466 			dc_stream_retain(new_stream);
10467 
10468 			DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
10469 					 crtc->base.id);
10470 
10471 			if (dc_add_stream_to_ctx(
10472 					dm->dc,
10473 					dm_state->context,
10474 					dm_new_crtc_state->stream) != DC_OK) {
10475 				ret = -EINVAL;
10476 				goto fail;
10477 			}
10478 
10479 			*lock_and_validation_needed = true;
10480 		}
10481 	}
10482 
10483 skip_modeset:
10484 	/* Release extra reference */
10485 	if (new_stream)
10486 		 dc_stream_release(new_stream);
10487 
10488 	/*
10489 	 * We want to do dc stream updates that do not require a
10490 	 * full modeset below.
10491 	 */
10492 	if (!(enable && aconnector && new_crtc_state->active))
10493 		return 0;
10494 	/*
10495 	 * Given above conditions, the dc state cannot be NULL because:
10496 	 * 1. We're in the process of enabling CRTCs (just been added
10497 	 *    to the dc context, or already is on the context)
10498 	 * 2. Has a valid connector attached, and
10499 	 * 3. Is currently active and enabled.
10500 	 * => The dc stream state currently exists.
10501 	 */
10502 	BUG_ON(dm_new_crtc_state->stream == NULL);
10503 
10504 	/* Scaling or underscan settings */
10505 	if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
10506 				drm_atomic_crtc_needs_modeset(new_crtc_state))
10507 		update_stream_scaling_settings(
10508 			&new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
10509 
10510 	/* ABM settings */
10511 	dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10512 
10513 	/*
10514 	 * Color management settings. We also update color properties
10515 	 * when a modeset is needed, to ensure it gets reprogrammed.
10516 	 */
10517 	if (dm_new_crtc_state->base.color_mgmt_changed ||
10518 	    drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10519 		ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
10520 		if (ret)
10521 			goto fail;
10522 	}
10523 
10524 	/* Update Freesync settings. */
10525 	get_freesync_config_for_crtc(dm_new_crtc_state,
10526 				     dm_new_conn_state);
10527 
10528 	return ret;
10529 
10530 fail:
10531 	if (new_stream)
10532 		dc_stream_release(new_stream);
10533 	return ret;
10534 }
10535 
10536 static bool should_reset_plane(struct drm_atomic_state *state,
10537 			       struct drm_plane *plane,
10538 			       struct drm_plane_state *old_plane_state,
10539 			       struct drm_plane_state *new_plane_state)
10540 {
10541 	struct drm_plane *other;
10542 	struct drm_plane_state *old_other_state, *new_other_state;
10543 	struct drm_crtc_state *new_crtc_state;
10544 	int i;
10545 
10546 	/*
10547 	 * TODO: Remove this hack once the checks below are sufficient
10548 	 * enough to determine when we need to reset all the planes on
10549 	 * the stream.
10550 	 */
10551 	if (state->allow_modeset)
10552 		return true;
10553 
10554 	/* Exit early if we know that we're adding or removing the plane. */
10555 	if (old_plane_state->crtc != new_plane_state->crtc)
10556 		return true;
10557 
10558 	/* old crtc == new_crtc == NULL, plane not in context. */
10559 	if (!new_plane_state->crtc)
10560 		return false;
10561 
10562 	new_crtc_state =
10563 		drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
10564 
10565 	if (!new_crtc_state)
10566 		return true;
10567 
10568 	/* CRTC Degamma changes currently require us to recreate planes. */
10569 	if (new_crtc_state->color_mgmt_changed)
10570 		return true;
10571 
10572 	if (drm_atomic_crtc_needs_modeset(new_crtc_state))
10573 		return true;
10574 
10575 	/*
10576 	 * If there are any new primary or overlay planes being added or
10577 	 * removed then the z-order can potentially change. To ensure
10578 	 * correct z-order and pipe acquisition the current DC architecture
10579 	 * requires us to remove and recreate all existing planes.
10580 	 *
10581 	 * TODO: Come up with a more elegant solution for this.
10582 	 */
10583 	for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
10584 		struct amdgpu_framebuffer *old_afb, *new_afb;
10585 		if (other->type == DRM_PLANE_TYPE_CURSOR)
10586 			continue;
10587 
10588 		if (old_other_state->crtc != new_plane_state->crtc &&
10589 		    new_other_state->crtc != new_plane_state->crtc)
10590 			continue;
10591 
10592 		if (old_other_state->crtc != new_other_state->crtc)
10593 			return true;
10594 
10595 		/* Src/dst size and scaling updates. */
10596 		if (old_other_state->src_w != new_other_state->src_w ||
10597 		    old_other_state->src_h != new_other_state->src_h ||
10598 		    old_other_state->crtc_w != new_other_state->crtc_w ||
10599 		    old_other_state->crtc_h != new_other_state->crtc_h)
10600 			return true;
10601 
10602 		/* Rotation / mirroring updates. */
10603 		if (old_other_state->rotation != new_other_state->rotation)
10604 			return true;
10605 
10606 		/* Blending updates. */
10607 		if (old_other_state->pixel_blend_mode !=
10608 		    new_other_state->pixel_blend_mode)
10609 			return true;
10610 
10611 		/* Alpha updates. */
10612 		if (old_other_state->alpha != new_other_state->alpha)
10613 			return true;
10614 
10615 		/* Colorspace changes. */
10616 		if (old_other_state->color_range != new_other_state->color_range ||
10617 		    old_other_state->color_encoding != new_other_state->color_encoding)
10618 			return true;
10619 
10620 		/* Framebuffer checks fall at the end. */
10621 		if (!old_other_state->fb || !new_other_state->fb)
10622 			continue;
10623 
10624 		/* Pixel format changes can require bandwidth updates. */
10625 		if (old_other_state->fb->format != new_other_state->fb->format)
10626 			return true;
10627 
10628 		old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
10629 		new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
10630 
10631 		/* Tiling and DCC changes also require bandwidth updates. */
10632 		if (old_afb->tiling_flags != new_afb->tiling_flags ||
10633 		    old_afb->base.modifier != new_afb->base.modifier)
10634 			return true;
10635 	}
10636 
10637 	return false;
10638 }
10639 
10640 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
10641 			      struct drm_plane_state *new_plane_state,
10642 			      struct drm_framebuffer *fb)
10643 {
10644 	struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
10645 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
10646 	unsigned int pitch;
10647 	bool linear;
10648 
10649 	if (fb->width > new_acrtc->max_cursor_width ||
10650 	    fb->height > new_acrtc->max_cursor_height) {
10651 		DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
10652 				 new_plane_state->fb->width,
10653 				 new_plane_state->fb->height);
10654 		return -EINVAL;
10655 	}
10656 	if (new_plane_state->src_w != fb->width << 16 ||
10657 	    new_plane_state->src_h != fb->height << 16) {
10658 		DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10659 		return -EINVAL;
10660 	}
10661 
10662 	/* Pitch in pixels */
10663 	pitch = fb->pitches[0] / fb->format->cpp[0];
10664 
10665 	if (fb->width != pitch) {
10666 		DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
10667 				 fb->width, pitch);
10668 		return -EINVAL;
10669 	}
10670 
10671 	switch (pitch) {
10672 	case 64:
10673 	case 128:
10674 	case 256:
10675 		/* FB pitch is supported by cursor plane */
10676 		break;
10677 	default:
10678 		DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
10679 		return -EINVAL;
10680 	}
10681 
10682 	/* Core DRM takes care of checking FB modifiers, so we only need to
10683 	 * check tiling flags when the FB doesn't have a modifier. */
10684 	if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
10685 		if (adev->family < AMDGPU_FAMILY_AI) {
10686 			linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
10687 			         AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
10688 				 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
10689 		} else {
10690 			linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
10691 		}
10692 		if (!linear) {
10693 			DRM_DEBUG_ATOMIC("Cursor FB not linear");
10694 			return -EINVAL;
10695 		}
10696 	}
10697 
10698 	return 0;
10699 }
10700 
10701 static int dm_update_plane_state(struct dc *dc,
10702 				 struct drm_atomic_state *state,
10703 				 struct drm_plane *plane,
10704 				 struct drm_plane_state *old_plane_state,
10705 				 struct drm_plane_state *new_plane_state,
10706 				 bool enable,
10707 				 bool *lock_and_validation_needed)
10708 {
10709 
10710 	struct dm_atomic_state *dm_state = NULL;
10711 	struct drm_crtc *new_plane_crtc, *old_plane_crtc;
10712 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10713 	struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
10714 	struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
10715 	struct amdgpu_crtc *new_acrtc;
10716 	bool needs_reset;
10717 	int ret = 0;
10718 
10719 
10720 	new_plane_crtc = new_plane_state->crtc;
10721 	old_plane_crtc = old_plane_state->crtc;
10722 	dm_new_plane_state = to_dm_plane_state(new_plane_state);
10723 	dm_old_plane_state = to_dm_plane_state(old_plane_state);
10724 
10725 	if (plane->type == DRM_PLANE_TYPE_CURSOR) {
10726 		if (!enable || !new_plane_crtc ||
10727 			drm_atomic_plane_disabling(plane->state, new_plane_state))
10728 			return 0;
10729 
10730 		new_acrtc = to_amdgpu_crtc(new_plane_crtc);
10731 
10732 		if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
10733 			DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10734 			return -EINVAL;
10735 		}
10736 
10737 		if (new_plane_state->fb) {
10738 			ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
10739 						 new_plane_state->fb);
10740 			if (ret)
10741 				return ret;
10742 		}
10743 
10744 		return 0;
10745 	}
10746 
10747 	needs_reset = should_reset_plane(state, plane, old_plane_state,
10748 					 new_plane_state);
10749 
10750 	/* Remove any changed/removed planes */
10751 	if (!enable) {
10752 		if (!needs_reset)
10753 			return 0;
10754 
10755 		if (!old_plane_crtc)
10756 			return 0;
10757 
10758 		old_crtc_state = drm_atomic_get_old_crtc_state(
10759 				state, old_plane_crtc);
10760 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10761 
10762 		if (!dm_old_crtc_state->stream)
10763 			return 0;
10764 
10765 		DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
10766 				plane->base.id, old_plane_crtc->base.id);
10767 
10768 		ret = dm_atomic_get_state(state, &dm_state);
10769 		if (ret)
10770 			return ret;
10771 
10772 		if (!dc_remove_plane_from_context(
10773 				dc,
10774 				dm_old_crtc_state->stream,
10775 				dm_old_plane_state->dc_state,
10776 				dm_state->context)) {
10777 
10778 			return -EINVAL;
10779 		}
10780 
10781 
10782 		dc_plane_state_release(dm_old_plane_state->dc_state);
10783 		dm_new_plane_state->dc_state = NULL;
10784 
10785 		*lock_and_validation_needed = true;
10786 
10787 	} else { /* Add new planes */
10788 		struct dc_plane_state *dc_new_plane_state;
10789 
10790 		if (drm_atomic_plane_disabling(plane->state, new_plane_state))
10791 			return 0;
10792 
10793 		if (!new_plane_crtc)
10794 			return 0;
10795 
10796 		new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
10797 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10798 
10799 		if (!dm_new_crtc_state->stream)
10800 			return 0;
10801 
10802 		if (!needs_reset)
10803 			return 0;
10804 
10805 		ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
10806 		if (ret)
10807 			return ret;
10808 
10809 		WARN_ON(dm_new_plane_state->dc_state);
10810 
10811 		dc_new_plane_state = dc_create_plane_state(dc);
10812 		if (!dc_new_plane_state)
10813 			return -ENOMEM;
10814 
10815 		DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
10816 				 plane->base.id, new_plane_crtc->base.id);
10817 
10818 		ret = fill_dc_plane_attributes(
10819 			drm_to_adev(new_plane_crtc->dev),
10820 			dc_new_plane_state,
10821 			new_plane_state,
10822 			new_crtc_state);
10823 		if (ret) {
10824 			dc_plane_state_release(dc_new_plane_state);
10825 			return ret;
10826 		}
10827 
10828 		ret = dm_atomic_get_state(state, &dm_state);
10829 		if (ret) {
10830 			dc_plane_state_release(dc_new_plane_state);
10831 			return ret;
10832 		}
10833 
10834 		/*
10835 		 * Any atomic check errors that occur after this will
10836 		 * not need a release. The plane state will be attached
10837 		 * to the stream, and therefore part of the atomic
10838 		 * state. It'll be released when the atomic state is
10839 		 * cleaned.
10840 		 */
10841 		if (!dc_add_plane_to_context(
10842 				dc,
10843 				dm_new_crtc_state->stream,
10844 				dc_new_plane_state,
10845 				dm_state->context)) {
10846 
10847 			dc_plane_state_release(dc_new_plane_state);
10848 			return -EINVAL;
10849 		}
10850 
10851 		dm_new_plane_state->dc_state = dc_new_plane_state;
10852 
10853 		dm_new_crtc_state->mpo_requested |= (plane->type == DRM_PLANE_TYPE_OVERLAY);
10854 
10855 		/* Tell DC to do a full surface update every time there
10856 		 * is a plane change. Inefficient, but works for now.
10857 		 */
10858 		dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
10859 
10860 		*lock_and_validation_needed = true;
10861 	}
10862 
10863 
10864 	return ret;
10865 }
10866 
10867 static void dm_get_oriented_plane_size(struct drm_plane_state *plane_state,
10868 				       int *src_w, int *src_h)
10869 {
10870 	switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
10871 	case DRM_MODE_ROTATE_90:
10872 	case DRM_MODE_ROTATE_270:
10873 		*src_w = plane_state->src_h >> 16;
10874 		*src_h = plane_state->src_w >> 16;
10875 		break;
10876 	case DRM_MODE_ROTATE_0:
10877 	case DRM_MODE_ROTATE_180:
10878 	default:
10879 		*src_w = plane_state->src_w >> 16;
10880 		*src_h = plane_state->src_h >> 16;
10881 		break;
10882 	}
10883 }
10884 
10885 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
10886 				struct drm_crtc *crtc,
10887 				struct drm_crtc_state *new_crtc_state)
10888 {
10889 	struct drm_plane *cursor = crtc->cursor, *underlying;
10890 	struct drm_plane_state *new_cursor_state, *new_underlying_state;
10891 	int i;
10892 	int cursor_scale_w, cursor_scale_h, underlying_scale_w, underlying_scale_h;
10893 	int cursor_src_w, cursor_src_h;
10894 	int underlying_src_w, underlying_src_h;
10895 
10896 	/* On DCE and DCN there is no dedicated hardware cursor plane. We get a
10897 	 * cursor per pipe but it's going to inherit the scaling and
10898 	 * positioning from the underlying pipe. Check the cursor plane's
10899 	 * blending properties match the underlying planes'. */
10900 
10901 	new_cursor_state = drm_atomic_get_new_plane_state(state, cursor);
10902 	if (!new_cursor_state || !new_cursor_state->fb) {
10903 		return 0;
10904 	}
10905 
10906 	dm_get_oriented_plane_size(new_cursor_state, &cursor_src_w, &cursor_src_h);
10907 	cursor_scale_w = new_cursor_state->crtc_w * 1000 / cursor_src_w;
10908 	cursor_scale_h = new_cursor_state->crtc_h * 1000 / cursor_src_h;
10909 
10910 	for_each_new_plane_in_state_reverse(state, underlying, new_underlying_state, i) {
10911 		/* Narrow down to non-cursor planes on the same CRTC as the cursor */
10912 		if (new_underlying_state->crtc != crtc || underlying == crtc->cursor)
10913 			continue;
10914 
10915 		/* Ignore disabled planes */
10916 		if (!new_underlying_state->fb)
10917 			continue;
10918 
10919 		dm_get_oriented_plane_size(new_underlying_state,
10920 					   &underlying_src_w, &underlying_src_h);
10921 		underlying_scale_w = new_underlying_state->crtc_w * 1000 / underlying_src_w;
10922 		underlying_scale_h = new_underlying_state->crtc_h * 1000 / underlying_src_h;
10923 
10924 		if (cursor_scale_w != underlying_scale_w ||
10925 		    cursor_scale_h != underlying_scale_h) {
10926 			drm_dbg_atomic(crtc->dev,
10927 				       "Cursor [PLANE:%d:%s] scaling doesn't match underlying [PLANE:%d:%s]\n",
10928 				       cursor->base.id, cursor->name, underlying->base.id, underlying->name);
10929 			return -EINVAL;
10930 		}
10931 
10932 		/* If this plane covers the whole CRTC, no need to check planes underneath */
10933 		if (new_underlying_state->crtc_x <= 0 &&
10934 		    new_underlying_state->crtc_y <= 0 &&
10935 		    new_underlying_state->crtc_x + new_underlying_state->crtc_w >= new_crtc_state->mode.hdisplay &&
10936 		    new_underlying_state->crtc_y + new_underlying_state->crtc_h >= new_crtc_state->mode.vdisplay)
10937 			break;
10938 	}
10939 
10940 	return 0;
10941 }
10942 
10943 #if defined(CONFIG_DRM_AMD_DC_DCN)
10944 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
10945 {
10946 	struct drm_connector *connector;
10947 	struct drm_connector_state *conn_state, *old_conn_state;
10948 	struct amdgpu_dm_connector *aconnector = NULL;
10949 	int i;
10950 	for_each_oldnew_connector_in_state(state, connector, old_conn_state, conn_state, i) {
10951 		if (!conn_state->crtc)
10952 			conn_state = old_conn_state;
10953 
10954 		if (conn_state->crtc != crtc)
10955 			continue;
10956 
10957 		aconnector = to_amdgpu_dm_connector(connector);
10958 		if (!aconnector->port || !aconnector->mst_port)
10959 			aconnector = NULL;
10960 		else
10961 			break;
10962 	}
10963 
10964 	if (!aconnector)
10965 		return 0;
10966 
10967 	return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
10968 }
10969 #endif
10970 
10971 /**
10972  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
10973  * @dev: The DRM device
10974  * @state: The atomic state to commit
10975  *
10976  * Validate that the given atomic state is programmable by DC into hardware.
10977  * This involves constructing a &struct dc_state reflecting the new hardware
10978  * state we wish to commit, then querying DC to see if it is programmable. It's
10979  * important not to modify the existing DC state. Otherwise, atomic_check
10980  * may unexpectedly commit hardware changes.
10981  *
10982  * When validating the DC state, it's important that the right locks are
10983  * acquired. For full updates case which removes/adds/updates streams on one
10984  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
10985  * that any such full update commit will wait for completion of any outstanding
10986  * flip using DRMs synchronization events.
10987  *
10988  * Note that DM adds the affected connectors for all CRTCs in state, when that
10989  * might not seem necessary. This is because DC stream creation requires the
10990  * DC sink, which is tied to the DRM connector state. Cleaning this up should
10991  * be possible but non-trivial - a possible TODO item.
10992  *
10993  * Return: -Error code if validation failed.
10994  */
10995 static int amdgpu_dm_atomic_check(struct drm_device *dev,
10996 				  struct drm_atomic_state *state)
10997 {
10998 	struct amdgpu_device *adev = drm_to_adev(dev);
10999 	struct dm_atomic_state *dm_state = NULL;
11000 	struct dc *dc = adev->dm.dc;
11001 	struct drm_connector *connector;
11002 	struct drm_connector_state *old_con_state, *new_con_state;
11003 	struct drm_crtc *crtc;
11004 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
11005 	struct drm_plane *plane;
11006 	struct drm_plane_state *old_plane_state, *new_plane_state;
11007 	enum dc_status status;
11008 	int ret, i;
11009 	bool lock_and_validation_needed = false;
11010 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
11011 #if defined(CONFIG_DRM_AMD_DC_DCN)
11012 	struct dsc_mst_fairness_vars vars[MAX_PIPES];
11013 	struct drm_dp_mst_topology_state *mst_state;
11014 	struct drm_dp_mst_topology_mgr *mgr;
11015 #endif
11016 
11017 	trace_amdgpu_dm_atomic_check_begin(state);
11018 
11019 	ret = drm_atomic_helper_check_modeset(dev, state);
11020 	if (ret) {
11021 		DRM_DEBUG_DRIVER("drm_atomic_helper_check_modeset() failed\n");
11022 		goto fail;
11023 	}
11024 
11025 	/* Check connector changes */
11026 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
11027 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
11028 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
11029 
11030 		/* Skip connectors that are disabled or part of modeset already. */
11031 		if (!old_con_state->crtc && !new_con_state->crtc)
11032 			continue;
11033 
11034 		if (!new_con_state->crtc)
11035 			continue;
11036 
11037 		new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
11038 		if (IS_ERR(new_crtc_state)) {
11039 			DRM_DEBUG_DRIVER("drm_atomic_get_crtc_state() failed\n");
11040 			ret = PTR_ERR(new_crtc_state);
11041 			goto fail;
11042 		}
11043 
11044 		if (dm_old_con_state->abm_level !=
11045 		    dm_new_con_state->abm_level)
11046 			new_crtc_state->connectors_changed = true;
11047 	}
11048 
11049 #if defined(CONFIG_DRM_AMD_DC_DCN)
11050 	if (dc_resource_is_dsc_encoding_supported(dc)) {
11051 		for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11052 			if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
11053 				ret = add_affected_mst_dsc_crtcs(state, crtc);
11054 				if (ret) {
11055 					DRM_DEBUG_DRIVER("add_affected_mst_dsc_crtcs() failed\n");
11056 					goto fail;
11057 				}
11058 			}
11059 		}
11060 		pre_validate_dsc(state, &dm_state, vars);
11061 	}
11062 #endif
11063 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11064 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
11065 
11066 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
11067 		    !new_crtc_state->color_mgmt_changed &&
11068 		    old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
11069 			dm_old_crtc_state->dsc_force_changed == false)
11070 			continue;
11071 
11072 		ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
11073 		if (ret) {
11074 			DRM_DEBUG_DRIVER("amdgpu_dm_verify_lut_sizes() failed\n");
11075 			goto fail;
11076 		}
11077 
11078 		if (!new_crtc_state->enable)
11079 			continue;
11080 
11081 		ret = drm_atomic_add_affected_connectors(state, crtc);
11082 		if (ret) {
11083 			DRM_DEBUG_DRIVER("drm_atomic_add_affected_connectors() failed\n");
11084 			goto fail;
11085 		}
11086 
11087 		ret = drm_atomic_add_affected_planes(state, crtc);
11088 		if (ret) {
11089 			DRM_DEBUG_DRIVER("drm_atomic_add_affected_planes() failed\n");
11090 			goto fail;
11091 		}
11092 
11093 		if (dm_old_crtc_state->dsc_force_changed)
11094 			new_crtc_state->mode_changed = true;
11095 	}
11096 
11097 	/*
11098 	 * Add all primary and overlay planes on the CRTC to the state
11099 	 * whenever a plane is enabled to maintain correct z-ordering
11100 	 * and to enable fast surface updates.
11101 	 */
11102 	drm_for_each_crtc(crtc, dev) {
11103 		bool modified = false;
11104 
11105 		for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
11106 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
11107 				continue;
11108 
11109 			if (new_plane_state->crtc == crtc ||
11110 			    old_plane_state->crtc == crtc) {
11111 				modified = true;
11112 				break;
11113 			}
11114 		}
11115 
11116 		if (!modified)
11117 			continue;
11118 
11119 		drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
11120 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
11121 				continue;
11122 
11123 			new_plane_state =
11124 				drm_atomic_get_plane_state(state, plane);
11125 
11126 			if (IS_ERR(new_plane_state)) {
11127 				ret = PTR_ERR(new_plane_state);
11128 				DRM_DEBUG_DRIVER("new_plane_state is BAD\n");
11129 				goto fail;
11130 			}
11131 		}
11132 	}
11133 
11134 	/* Remove exiting planes if they are modified */
11135 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
11136 		ret = dm_update_plane_state(dc, state, plane,
11137 					    old_plane_state,
11138 					    new_plane_state,
11139 					    false,
11140 					    &lock_and_validation_needed);
11141 		if (ret) {
11142 			DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
11143 			goto fail;
11144 		}
11145 	}
11146 
11147 	/* Disable all crtcs which require disable */
11148 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11149 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
11150 					   old_crtc_state,
11151 					   new_crtc_state,
11152 					   false,
11153 					   &lock_and_validation_needed);
11154 		if (ret) {
11155 			DRM_DEBUG_DRIVER("DISABLE: dm_update_crtc_state() failed\n");
11156 			goto fail;
11157 		}
11158 	}
11159 
11160 	/* Enable all crtcs which require enable */
11161 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11162 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
11163 					   old_crtc_state,
11164 					   new_crtc_state,
11165 					   true,
11166 					   &lock_and_validation_needed);
11167 		if (ret) {
11168 			DRM_DEBUG_DRIVER("ENABLE: dm_update_crtc_state() failed\n");
11169 			goto fail;
11170 		}
11171 	}
11172 
11173 	/* Add new/modified planes */
11174 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
11175 		ret = dm_update_plane_state(dc, state, plane,
11176 					    old_plane_state,
11177 					    new_plane_state,
11178 					    true,
11179 					    &lock_and_validation_needed);
11180 		if (ret) {
11181 			DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
11182 			goto fail;
11183 		}
11184 	}
11185 
11186 	/* Run this here since we want to validate the streams we created */
11187 	ret = drm_atomic_helper_check_planes(dev, state);
11188 	if (ret) {
11189 		DRM_DEBUG_DRIVER("drm_atomic_helper_check_planes() failed\n");
11190 		goto fail;
11191 	}
11192 
11193 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
11194 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
11195 		if (dm_new_crtc_state->mpo_requested)
11196 			DRM_DEBUG_DRIVER("MPO enablement requested on crtc:[%p]\n", crtc);
11197 	}
11198 
11199 	/* Check cursor planes scaling */
11200 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
11201 		ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
11202 		if (ret) {
11203 			DRM_DEBUG_DRIVER("dm_check_crtc_cursor() failed\n");
11204 			goto fail;
11205 		}
11206 	}
11207 
11208 	if (state->legacy_cursor_update) {
11209 		/*
11210 		 * This is a fast cursor update coming from the plane update
11211 		 * helper, check if it can be done asynchronously for better
11212 		 * performance.
11213 		 */
11214 		state->async_update =
11215 			!drm_atomic_helper_async_check(dev, state);
11216 
11217 		/*
11218 		 * Skip the remaining global validation if this is an async
11219 		 * update. Cursor updates can be done without affecting
11220 		 * state or bandwidth calcs and this avoids the performance
11221 		 * penalty of locking the private state object and
11222 		 * allocating a new dc_state.
11223 		 */
11224 		if (state->async_update)
11225 			return 0;
11226 	}
11227 
11228 	/* Check scaling and underscan changes*/
11229 	/* TODO Removed scaling changes validation due to inability to commit
11230 	 * new stream into context w\o causing full reset. Need to
11231 	 * decide how to handle.
11232 	 */
11233 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
11234 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
11235 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
11236 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
11237 
11238 		/* Skip any modesets/resets */
11239 		if (!acrtc || drm_atomic_crtc_needs_modeset(
11240 				drm_atomic_get_new_crtc_state(state, &acrtc->base)))
11241 			continue;
11242 
11243 		/* Skip any thing not scale or underscan changes */
11244 		if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
11245 			continue;
11246 
11247 		lock_and_validation_needed = true;
11248 	}
11249 
11250 #if defined(CONFIG_DRM_AMD_DC_DCN)
11251 	/* set the slot info for each mst_state based on the link encoding format */
11252 	for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
11253 		struct amdgpu_dm_connector *aconnector;
11254 		struct drm_connector *connector;
11255 		struct drm_connector_list_iter iter;
11256 		u8 link_coding_cap;
11257 
11258 		if (!mgr->mst_state )
11259 			continue;
11260 
11261 		drm_connector_list_iter_begin(dev, &iter);
11262 		drm_for_each_connector_iter(connector, &iter) {
11263 			int id = connector->index;
11264 
11265 			if (id == mst_state->mgr->conn_base_id) {
11266 				aconnector = to_amdgpu_dm_connector(connector);
11267 				link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link);
11268 				drm_dp_mst_update_slots(mst_state, link_coding_cap);
11269 
11270 				break;
11271 			}
11272 		}
11273 		drm_connector_list_iter_end(&iter);
11274 
11275 	}
11276 #endif
11277 	/**
11278 	 * Streams and planes are reset when there are changes that affect
11279 	 * bandwidth. Anything that affects bandwidth needs to go through
11280 	 * DC global validation to ensure that the configuration can be applied
11281 	 * to hardware.
11282 	 *
11283 	 * We have to currently stall out here in atomic_check for outstanding
11284 	 * commits to finish in this case because our IRQ handlers reference
11285 	 * DRM state directly - we can end up disabling interrupts too early
11286 	 * if we don't.
11287 	 *
11288 	 * TODO: Remove this stall and drop DM state private objects.
11289 	 */
11290 	if (lock_and_validation_needed) {
11291 		ret = dm_atomic_get_state(state, &dm_state);
11292 		if (ret) {
11293 			DRM_DEBUG_DRIVER("dm_atomic_get_state() failed\n");
11294 			goto fail;
11295 		}
11296 
11297 		ret = do_aquire_global_lock(dev, state);
11298 		if (ret) {
11299 			DRM_DEBUG_DRIVER("do_aquire_global_lock() failed\n");
11300 			goto fail;
11301 		}
11302 
11303 #if defined(CONFIG_DRM_AMD_DC_DCN)
11304 		if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars)) {
11305 			DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n");
11306 			goto fail;
11307 		}
11308 
11309 		ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
11310 		if (ret) {
11311 			DRM_DEBUG_DRIVER("dm_update_mst_vcpi_slots_for_dsc() failed\n");
11312 			goto fail;
11313 		}
11314 #endif
11315 
11316 		/*
11317 		 * Perform validation of MST topology in the state:
11318 		 * We need to perform MST atomic check before calling
11319 		 * dc_validate_global_state(), or there is a chance
11320 		 * to get stuck in an infinite loop and hang eventually.
11321 		 */
11322 		ret = drm_dp_mst_atomic_check(state);
11323 		if (ret) {
11324 			DRM_DEBUG_DRIVER("drm_dp_mst_atomic_check() failed\n");
11325 			goto fail;
11326 		}
11327 		status = dc_validate_global_state(dc, dm_state->context, true);
11328 		if (status != DC_OK) {
11329 			DRM_DEBUG_DRIVER("DC global validation failure: %s (%d)",
11330 				       dc_status_to_str(status), status);
11331 			ret = -EINVAL;
11332 			goto fail;
11333 		}
11334 	} else {
11335 		/*
11336 		 * The commit is a fast update. Fast updates shouldn't change
11337 		 * the DC context, affect global validation, and can have their
11338 		 * commit work done in parallel with other commits not touching
11339 		 * the same resource. If we have a new DC context as part of
11340 		 * the DM atomic state from validation we need to free it and
11341 		 * retain the existing one instead.
11342 		 *
11343 		 * Furthermore, since the DM atomic state only contains the DC
11344 		 * context and can safely be annulled, we can free the state
11345 		 * and clear the associated private object now to free
11346 		 * some memory and avoid a possible use-after-free later.
11347 		 */
11348 
11349 		for (i = 0; i < state->num_private_objs; i++) {
11350 			struct drm_private_obj *obj = state->private_objs[i].ptr;
11351 
11352 			if (obj->funcs == adev->dm.atomic_obj.funcs) {
11353 				int j = state->num_private_objs-1;
11354 
11355 				dm_atomic_destroy_state(obj,
11356 						state->private_objs[i].state);
11357 
11358 				/* If i is not at the end of the array then the
11359 				 * last element needs to be moved to where i was
11360 				 * before the array can safely be truncated.
11361 				 */
11362 				if (i != j)
11363 					state->private_objs[i] =
11364 						state->private_objs[j];
11365 
11366 				state->private_objs[j].ptr = NULL;
11367 				state->private_objs[j].state = NULL;
11368 				state->private_objs[j].old_state = NULL;
11369 				state->private_objs[j].new_state = NULL;
11370 
11371 				state->num_private_objs = j;
11372 				break;
11373 			}
11374 		}
11375 	}
11376 
11377 	/* Store the overall update type for use later in atomic check. */
11378 	for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
11379 		struct dm_crtc_state *dm_new_crtc_state =
11380 			to_dm_crtc_state(new_crtc_state);
11381 
11382 		dm_new_crtc_state->update_type = lock_and_validation_needed ?
11383 							 UPDATE_TYPE_FULL :
11384 							 UPDATE_TYPE_FAST;
11385 	}
11386 
11387 	/* Must be success */
11388 	WARN_ON(ret);
11389 
11390 	trace_amdgpu_dm_atomic_check_finish(state, ret);
11391 
11392 	return ret;
11393 
11394 fail:
11395 	if (ret == -EDEADLK)
11396 		DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
11397 	else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
11398 		DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
11399 	else
11400 		DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
11401 
11402 	trace_amdgpu_dm_atomic_check_finish(state, ret);
11403 
11404 	return ret;
11405 }
11406 
11407 static bool is_dp_capable_without_timing_msa(struct dc *dc,
11408 					     struct amdgpu_dm_connector *amdgpu_dm_connector)
11409 {
11410 	uint8_t dpcd_data;
11411 	bool capable = false;
11412 
11413 	if (amdgpu_dm_connector->dc_link &&
11414 		dm_helpers_dp_read_dpcd(
11415 				NULL,
11416 				amdgpu_dm_connector->dc_link,
11417 				DP_DOWN_STREAM_PORT_COUNT,
11418 				&dpcd_data,
11419 				sizeof(dpcd_data))) {
11420 		capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
11421 	}
11422 
11423 	return capable;
11424 }
11425 
11426 static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
11427 		unsigned int offset,
11428 		unsigned int total_length,
11429 		uint8_t *data,
11430 		unsigned int length,
11431 		struct amdgpu_hdmi_vsdb_info *vsdb)
11432 {
11433 	bool res;
11434 	union dmub_rb_cmd cmd;
11435 	struct dmub_cmd_send_edid_cea *input;
11436 	struct dmub_cmd_edid_cea_output *output;
11437 
11438 	if (length > DMUB_EDID_CEA_DATA_CHUNK_BYTES)
11439 		return false;
11440 
11441 	memset(&cmd, 0, sizeof(cmd));
11442 
11443 	input = &cmd.edid_cea.data.input;
11444 
11445 	cmd.edid_cea.header.type = DMUB_CMD__EDID_CEA;
11446 	cmd.edid_cea.header.sub_type = 0;
11447 	cmd.edid_cea.header.payload_bytes =
11448 		sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header);
11449 	input->offset = offset;
11450 	input->length = length;
11451 	input->cea_total_length = total_length;
11452 	memcpy(input->payload, data, length);
11453 
11454 	res = dc_dmub_srv_cmd_with_reply_data(dm->dc->ctx->dmub_srv, &cmd);
11455 	if (!res) {
11456 		DRM_ERROR("EDID CEA parser failed\n");
11457 		return false;
11458 	}
11459 
11460 	output = &cmd.edid_cea.data.output;
11461 
11462 	if (output->type == DMUB_CMD__EDID_CEA_ACK) {
11463 		if (!output->ack.success) {
11464 			DRM_ERROR("EDID CEA ack failed at offset %d\n",
11465 					output->ack.offset);
11466 		}
11467 	} else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) {
11468 		if (!output->amd_vsdb.vsdb_found)
11469 			return false;
11470 
11471 		vsdb->freesync_supported = output->amd_vsdb.freesync_supported;
11472 		vsdb->amd_vsdb_version = output->amd_vsdb.amd_vsdb_version;
11473 		vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate;
11474 		vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate;
11475 	} else {
11476 		DRM_WARN("Unknown EDID CEA parser results\n");
11477 		return false;
11478 	}
11479 
11480 	return true;
11481 }
11482 
11483 static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
11484 		uint8_t *edid_ext, int len,
11485 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
11486 {
11487 	int i;
11488 
11489 	/* send extension block to DMCU for parsing */
11490 	for (i = 0; i < len; i += 8) {
11491 		bool res;
11492 		int offset;
11493 
11494 		/* send 8 bytes a time */
11495 		if (!dc_edid_parser_send_cea(dm->dc, i, len, &edid_ext[i], 8))
11496 			return false;
11497 
11498 		if (i+8 == len) {
11499 			/* EDID block sent completed, expect result */
11500 			int version, min_rate, max_rate;
11501 
11502 			res = dc_edid_parser_recv_amd_vsdb(dm->dc, &version, &min_rate, &max_rate);
11503 			if (res) {
11504 				/* amd vsdb found */
11505 				vsdb_info->freesync_supported = 1;
11506 				vsdb_info->amd_vsdb_version = version;
11507 				vsdb_info->min_refresh_rate_hz = min_rate;
11508 				vsdb_info->max_refresh_rate_hz = max_rate;
11509 				return true;
11510 			}
11511 			/* not amd vsdb */
11512 			return false;
11513 		}
11514 
11515 		/* check for ack*/
11516 		res = dc_edid_parser_recv_cea_ack(dm->dc, &offset);
11517 		if (!res)
11518 			return false;
11519 	}
11520 
11521 	return false;
11522 }
11523 
11524 static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,
11525 		uint8_t *edid_ext, int len,
11526 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
11527 {
11528 	int i;
11529 
11530 	/* send extension block to DMCU for parsing */
11531 	for (i = 0; i < len; i += 8) {
11532 		/* send 8 bytes a time */
11533 		if (!dm_edid_parser_send_cea(dm, i, len, &edid_ext[i], 8, vsdb_info))
11534 			return false;
11535 	}
11536 
11537 	return vsdb_info->freesync_supported;
11538 }
11539 
11540 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
11541 		uint8_t *edid_ext, int len,
11542 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
11543 {
11544 	struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
11545 
11546 	if (adev->dm.dmub_srv)
11547 		return parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info);
11548 	else
11549 		return parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info);
11550 }
11551 
11552 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
11553 		struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
11554 {
11555 	uint8_t *edid_ext = NULL;
11556 	int i;
11557 	bool valid_vsdb_found = false;
11558 
11559 	/*----- drm_find_cea_extension() -----*/
11560 	/* No EDID or EDID extensions */
11561 	if (edid == NULL || edid->extensions == 0)
11562 		return -ENODEV;
11563 
11564 	/* Find CEA extension */
11565 	for (i = 0; i < edid->extensions; i++) {
11566 		edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
11567 		if (edid_ext[0] == CEA_EXT)
11568 			break;
11569 	}
11570 
11571 	if (i == edid->extensions)
11572 		return -ENODEV;
11573 
11574 	/*----- cea_db_offsets() -----*/
11575 	if (edid_ext[0] != CEA_EXT)
11576 		return -ENODEV;
11577 
11578 	valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
11579 
11580 	return valid_vsdb_found ? i : -ENODEV;
11581 }
11582 
11583 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
11584 					struct edid *edid)
11585 {
11586 	int i = 0;
11587 	struct detailed_timing *timing;
11588 	struct detailed_non_pixel *data;
11589 	struct detailed_data_monitor_range *range;
11590 	struct amdgpu_dm_connector *amdgpu_dm_connector =
11591 			to_amdgpu_dm_connector(connector);
11592 	struct dm_connector_state *dm_con_state = NULL;
11593 	struct dc_sink *sink;
11594 
11595 	struct drm_device *dev = connector->dev;
11596 	struct amdgpu_device *adev = drm_to_adev(dev);
11597 	bool freesync_capable = false;
11598 	struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
11599 
11600 	if (!connector->state) {
11601 		DRM_ERROR("%s - Connector has no state", __func__);
11602 		goto update;
11603 	}
11604 
11605 	sink = amdgpu_dm_connector->dc_sink ?
11606 		amdgpu_dm_connector->dc_sink :
11607 		amdgpu_dm_connector->dc_em_sink;
11608 
11609 	if (!edid || !sink) {
11610 		dm_con_state = to_dm_connector_state(connector->state);
11611 
11612 		amdgpu_dm_connector->min_vfreq = 0;
11613 		amdgpu_dm_connector->max_vfreq = 0;
11614 		amdgpu_dm_connector->pixel_clock_mhz = 0;
11615 		connector->display_info.monitor_range.min_vfreq = 0;
11616 		connector->display_info.monitor_range.max_vfreq = 0;
11617 		freesync_capable = false;
11618 
11619 		goto update;
11620 	}
11621 
11622 	dm_con_state = to_dm_connector_state(connector->state);
11623 
11624 	if (!adev->dm.freesync_module)
11625 		goto update;
11626 
11627 
11628 	if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
11629 		|| sink->sink_signal == SIGNAL_TYPE_EDP) {
11630 		bool edid_check_required = false;
11631 
11632 		if (edid) {
11633 			edid_check_required = is_dp_capable_without_timing_msa(
11634 						adev->dm.dc,
11635 						amdgpu_dm_connector);
11636 		}
11637 
11638 		if (edid_check_required == true && (edid->version > 1 ||
11639 		   (edid->version == 1 && edid->revision > 1))) {
11640 			for (i = 0; i < 4; i++) {
11641 
11642 				timing	= &edid->detailed_timings[i];
11643 				data	= &timing->data.other_data;
11644 				range	= &data->data.range;
11645 				/*
11646 				 * Check if monitor has continuous frequency mode
11647 				 */
11648 				if (data->type != EDID_DETAIL_MONITOR_RANGE)
11649 					continue;
11650 				/*
11651 				 * Check for flag range limits only. If flag == 1 then
11652 				 * no additional timing information provided.
11653 				 * Default GTF, GTF Secondary curve and CVT are not
11654 				 * supported
11655 				 */
11656 				if (range->flags != 1)
11657 					continue;
11658 
11659 				amdgpu_dm_connector->min_vfreq = range->min_vfreq;
11660 				amdgpu_dm_connector->max_vfreq = range->max_vfreq;
11661 				amdgpu_dm_connector->pixel_clock_mhz =
11662 					range->pixel_clock_mhz * 10;
11663 
11664 				connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
11665 				connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
11666 
11667 				break;
11668 			}
11669 
11670 			if (amdgpu_dm_connector->max_vfreq -
11671 			    amdgpu_dm_connector->min_vfreq > 10) {
11672 
11673 				freesync_capable = true;
11674 			}
11675 		}
11676 	} else if (edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
11677 		i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
11678 		if (i >= 0 && vsdb_info.freesync_supported) {
11679 			timing  = &edid->detailed_timings[i];
11680 			data    = &timing->data.other_data;
11681 
11682 			amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
11683 			amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
11684 			if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
11685 				freesync_capable = true;
11686 
11687 			connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
11688 			connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
11689 		}
11690 	}
11691 
11692 update:
11693 	if (dm_con_state)
11694 		dm_con_state->freesync_capable = freesync_capable;
11695 
11696 	if (connector->vrr_capable_property)
11697 		drm_connector_set_vrr_capable_property(connector,
11698 						       freesync_capable);
11699 }
11700 
11701 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
11702 {
11703 	struct amdgpu_device *adev = drm_to_adev(dev);
11704 	struct dc *dc = adev->dm.dc;
11705 	int i;
11706 
11707 	mutex_lock(&adev->dm.dc_lock);
11708 	if (dc->current_state) {
11709 		for (i = 0; i < dc->current_state->stream_count; ++i)
11710 			dc->current_state->streams[i]
11711 				->triggered_crtc_reset.enabled =
11712 				adev->dm.force_timing_sync;
11713 
11714 		dm_enable_per_frame_crtc_master_sync(dc->current_state);
11715 		dc_trigger_sync(dc, dc->current_state);
11716 	}
11717 	mutex_unlock(&adev->dm.dc_lock);
11718 }
11719 
11720 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
11721 		       uint32_t value, const char *func_name)
11722 {
11723 #ifdef DM_CHECK_ADDR_0
11724 	if (address == 0) {
11725 		DC_ERR("invalid register write. address = 0");
11726 		return;
11727 	}
11728 #endif
11729 	cgs_write_register(ctx->cgs_device, address, value);
11730 	trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
11731 }
11732 
11733 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
11734 			  const char *func_name)
11735 {
11736 	uint32_t value;
11737 #ifdef DM_CHECK_ADDR_0
11738 	if (address == 0) {
11739 		DC_ERR("invalid register read; address = 0\n");
11740 		return 0;
11741 	}
11742 #endif
11743 
11744 	if (ctx->dmub_srv &&
11745 	    ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
11746 	    !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
11747 		ASSERT(false);
11748 		return 0;
11749 	}
11750 
11751 	value = cgs_read_register(ctx->cgs_device, address);
11752 
11753 	trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
11754 
11755 	return value;
11756 }
11757 
11758 static int amdgpu_dm_set_dmub_async_sync_status(bool is_cmd_aux,
11759 						struct dc_context *ctx,
11760 						uint8_t status_type,
11761 						uint32_t *operation_result)
11762 {
11763 	struct amdgpu_device *adev = ctx->driver_context;
11764 	int return_status = -1;
11765 	struct dmub_notification *p_notify = adev->dm.dmub_notify;
11766 
11767 	if (is_cmd_aux) {
11768 		if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
11769 			return_status = p_notify->aux_reply.length;
11770 			*operation_result = p_notify->result;
11771 		} else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT) {
11772 			*operation_result = AUX_RET_ERROR_TIMEOUT;
11773 		} else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_FAIL) {
11774 			*operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE;
11775 		} else {
11776 			*operation_result = AUX_RET_ERROR_UNKNOWN;
11777 		}
11778 	} else {
11779 		if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
11780 			return_status = 0;
11781 			*operation_result = p_notify->sc_status;
11782 		} else {
11783 			*operation_result = SET_CONFIG_UNKNOWN_ERROR;
11784 		}
11785 	}
11786 
11787 	return return_status;
11788 }
11789 
11790 int amdgpu_dm_process_dmub_aux_transfer_sync(bool is_cmd_aux, struct dc_context *ctx,
11791 	unsigned int link_index, void *cmd_payload, void *operation_result)
11792 {
11793 	struct amdgpu_device *adev = ctx->driver_context;
11794 	int ret = 0;
11795 
11796 	if (is_cmd_aux) {
11797 		dc_process_dmub_aux_transfer_async(ctx->dc,
11798 			link_index, (struct aux_payload *)cmd_payload);
11799 	} else if (dc_process_dmub_set_config_async(ctx->dc, link_index,
11800 					(struct set_config_cmd_payload *)cmd_payload,
11801 					adev->dm.dmub_notify)) {
11802 		return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11803 					ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
11804 					(uint32_t *)operation_result);
11805 	}
11806 
11807 	ret = wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ);
11808 	if (ret == 0) {
11809 		DRM_ERROR("wait_for_completion_timeout timeout!");
11810 		return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11811 				ctx, DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT,
11812 				(uint32_t *)operation_result);
11813 	}
11814 
11815 	if (is_cmd_aux) {
11816 		if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) {
11817 			struct aux_payload *payload = (struct aux_payload *)cmd_payload;
11818 
11819 			payload->reply[0] = adev->dm.dmub_notify->aux_reply.command;
11820 			if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
11821 			    payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK) {
11822 				memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
11823 				       adev->dm.dmub_notify->aux_reply.length);
11824 			}
11825 		}
11826 	}
11827 
11828 	return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11829 			ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
11830 			(uint32_t *)operation_result);
11831 }
11832 
11833 /*
11834  * Check whether seamless boot is supported.
11835  *
11836  * So far we only support seamless boot on CHIP_VANGOGH.
11837  * If everything goes well, we may consider expanding
11838  * seamless boot to other ASICs.
11839  */
11840 bool check_seamless_boot_capability(struct amdgpu_device *adev)
11841 {
11842 	switch (adev->asic_type) {
11843 	case CHIP_VANGOGH:
11844 		if (!adev->mman.keep_stolen_vga_memory)
11845 			return true;
11846 		break;
11847 	default:
11848 		break;
11849 	}
11850 
11851 	return false;
11852 }
11853