xref: /linux/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c (revision ffd89aa968d9046ab5fb9f7cdb7f8d3c383a15c1)
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28 
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc_link_dp.h"
32 #include "link_enc_cfg.h"
33 #include "dc/inc/core_types.h"
34 #include "dal_asic_id.h"
35 #include "dmub/dmub_srv.h"
36 #include "dc/inc/hw/dmcu.h"
37 #include "dc/inc/hw/abm.h"
38 #include "dc/dc_dmub_srv.h"
39 #include "dc/dc_edid_parser.h"
40 #include "dc/dc_stat.h"
41 #include "amdgpu_dm_trace.h"
42 
43 #include "vid.h"
44 #include "amdgpu.h"
45 #include "amdgpu_display.h"
46 #include "amdgpu_ucode.h"
47 #include "atom.h"
48 #include "amdgpu_dm.h"
49 #ifdef CONFIG_DRM_AMD_DC_HDCP
50 #include "amdgpu_dm_hdcp.h"
51 #include <drm/drm_hdcp.h>
52 #endif
53 #include "amdgpu_pm.h"
54 
55 #include "amd_shared.h"
56 #include "amdgpu_dm_irq.h"
57 #include "dm_helpers.h"
58 #include "amdgpu_dm_mst_types.h"
59 #if defined(CONFIG_DEBUG_FS)
60 #include "amdgpu_dm_debugfs.h"
61 #endif
62 #include "amdgpu_dm_psr.h"
63 
64 #include "ivsrcid/ivsrcid_vislands30.h"
65 
66 #include "i2caux_interface.h"
67 #include <linux/module.h>
68 #include <linux/moduleparam.h>
69 #include <linux/types.h>
70 #include <linux/pm_runtime.h>
71 #include <linux/pci.h>
72 #include <linux/firmware.h>
73 #include <linux/component.h>
74 
75 #include <drm/drm_atomic.h>
76 #include <drm/drm_atomic_uapi.h>
77 #include <drm/drm_atomic_helper.h>
78 #include <drm/drm_dp_mst_helper.h>
79 #include <drm/drm_fb_helper.h>
80 #include <drm/drm_fourcc.h>
81 #include <drm/drm_edid.h>
82 #include <drm/drm_vblank.h>
83 #include <drm/drm_audio_component.h>
84 
85 #if defined(CONFIG_DRM_AMD_DC_DCN)
86 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
87 
88 #include "dcn/dcn_1_0_offset.h"
89 #include "dcn/dcn_1_0_sh_mask.h"
90 #include "soc15_hw_ip.h"
91 #include "vega10_ip_offset.h"
92 
93 #include "soc15_common.h"
94 #endif
95 
96 #include "modules/inc/mod_freesync.h"
97 #include "modules/power/power_helpers.h"
98 #include "modules/inc/mod_info_packet.h"
99 
100 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
101 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
102 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
103 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
104 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
105 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
106 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
107 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
108 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
109 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
110 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
111 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
112 #define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
113 MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
114 #define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin"
115 MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB);
116 
117 #define FIRMWARE_RAVEN_DMCU		"amdgpu/raven_dmcu.bin"
118 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
119 
120 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
121 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
122 
123 /* Number of bytes in PSP header for firmware. */
124 #define PSP_HEADER_BYTES 0x100
125 
126 /* Number of bytes in PSP footer for firmware. */
127 #define PSP_FOOTER_BYTES 0x100
128 
129 /**
130  * DOC: overview
131  *
132  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
133  * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
134  * requests into DC requests, and DC responses into DRM responses.
135  *
136  * The root control structure is &struct amdgpu_display_manager.
137  */
138 
139 /* basic init/fini API */
140 static int amdgpu_dm_init(struct amdgpu_device *adev);
141 static void amdgpu_dm_fini(struct amdgpu_device *adev);
142 static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
143 
144 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
145 {
146 	switch (link->dpcd_caps.dongle_type) {
147 	case DISPLAY_DONGLE_NONE:
148 		return DRM_MODE_SUBCONNECTOR_Native;
149 	case DISPLAY_DONGLE_DP_VGA_CONVERTER:
150 		return DRM_MODE_SUBCONNECTOR_VGA;
151 	case DISPLAY_DONGLE_DP_DVI_CONVERTER:
152 	case DISPLAY_DONGLE_DP_DVI_DONGLE:
153 		return DRM_MODE_SUBCONNECTOR_DVID;
154 	case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
155 	case DISPLAY_DONGLE_DP_HDMI_DONGLE:
156 		return DRM_MODE_SUBCONNECTOR_HDMIA;
157 	case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
158 	default:
159 		return DRM_MODE_SUBCONNECTOR_Unknown;
160 	}
161 }
162 
163 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
164 {
165 	struct dc_link *link = aconnector->dc_link;
166 	struct drm_connector *connector = &aconnector->base;
167 	enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
168 
169 	if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
170 		return;
171 
172 	if (aconnector->dc_sink)
173 		subconnector = get_subconnector_type(link);
174 
175 	drm_object_property_set_value(&connector->base,
176 			connector->dev->mode_config.dp_subconnector_property,
177 			subconnector);
178 }
179 
180 /*
181  * initializes drm_device display related structures, based on the information
182  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
183  * drm_encoder, drm_mode_config
184  *
185  * Returns 0 on success
186  */
187 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
188 /* removes and deallocates the drm structures, created by the above function */
189 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
190 
191 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
192 				struct drm_plane *plane,
193 				unsigned long possible_crtcs,
194 				const struct dc_plane_cap *plane_cap);
195 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
196 			       struct drm_plane *plane,
197 			       uint32_t link_index);
198 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
199 				    struct amdgpu_dm_connector *amdgpu_dm_connector,
200 				    uint32_t link_index,
201 				    struct amdgpu_encoder *amdgpu_encoder);
202 static int amdgpu_dm_encoder_init(struct drm_device *dev,
203 				  struct amdgpu_encoder *aencoder,
204 				  uint32_t link_index);
205 
206 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
207 
208 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
209 
210 static int amdgpu_dm_atomic_check(struct drm_device *dev,
211 				  struct drm_atomic_state *state);
212 
213 static void handle_cursor_update(struct drm_plane *plane,
214 				 struct drm_plane_state *old_plane_state);
215 
216 static const struct drm_format_info *
217 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
218 
219 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector);
220 
221 static bool
222 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
223 				 struct drm_crtc_state *new_crtc_state);
224 /*
225  * dm_vblank_get_counter
226  *
227  * @brief
228  * Get counter for number of vertical blanks
229  *
230  * @param
231  * struct amdgpu_device *adev - [in] desired amdgpu device
232  * int disp_idx - [in] which CRTC to get the counter from
233  *
234  * @return
235  * Counter for vertical blanks
236  */
237 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
238 {
239 	if (crtc >= adev->mode_info.num_crtc)
240 		return 0;
241 	else {
242 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
243 
244 		if (acrtc->dm_irq_params.stream == NULL) {
245 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
246 				  crtc);
247 			return 0;
248 		}
249 
250 		return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
251 	}
252 }
253 
254 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
255 				  u32 *vbl, u32 *position)
256 {
257 	uint32_t v_blank_start, v_blank_end, h_position, v_position;
258 
259 	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
260 		return -EINVAL;
261 	else {
262 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
263 
264 		if (acrtc->dm_irq_params.stream ==  NULL) {
265 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
266 				  crtc);
267 			return 0;
268 		}
269 
270 		/*
271 		 * TODO rework base driver to use values directly.
272 		 * for now parse it back into reg-format
273 		 */
274 		dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
275 					 &v_blank_start,
276 					 &v_blank_end,
277 					 &h_position,
278 					 &v_position);
279 
280 		*position = v_position | (h_position << 16);
281 		*vbl = v_blank_start | (v_blank_end << 16);
282 	}
283 
284 	return 0;
285 }
286 
287 static bool dm_is_idle(void *handle)
288 {
289 	/* XXX todo */
290 	return true;
291 }
292 
293 static int dm_wait_for_idle(void *handle)
294 {
295 	/* XXX todo */
296 	return 0;
297 }
298 
299 static bool dm_check_soft_reset(void *handle)
300 {
301 	return false;
302 }
303 
304 static int dm_soft_reset(void *handle)
305 {
306 	/* XXX todo */
307 	return 0;
308 }
309 
310 static struct amdgpu_crtc *
311 get_crtc_by_otg_inst(struct amdgpu_device *adev,
312 		     int otg_inst)
313 {
314 	struct drm_device *dev = adev_to_drm(adev);
315 	struct drm_crtc *crtc;
316 	struct amdgpu_crtc *amdgpu_crtc;
317 
318 	if (WARN_ON(otg_inst == -1))
319 		return adev->mode_info.crtcs[0];
320 
321 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
322 		amdgpu_crtc = to_amdgpu_crtc(crtc);
323 
324 		if (amdgpu_crtc->otg_inst == otg_inst)
325 			return amdgpu_crtc;
326 	}
327 
328 	return NULL;
329 }
330 
331 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
332 {
333 	return acrtc->dm_irq_params.freesync_config.state ==
334 		       VRR_STATE_ACTIVE_VARIABLE ||
335 	       acrtc->dm_irq_params.freesync_config.state ==
336 		       VRR_STATE_ACTIVE_FIXED;
337 }
338 
339 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
340 {
341 	return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
342 	       dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
343 }
344 
345 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
346 					      struct dm_crtc_state *new_state)
347 {
348 	if (new_state->freesync_config.state ==  VRR_STATE_ACTIVE_FIXED)
349 		return true;
350 	else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
351 		return true;
352 	else
353 		return false;
354 }
355 
356 /**
357  * dm_pflip_high_irq() - Handle pageflip interrupt
358  * @interrupt_params: ignored
359  *
360  * Handles the pageflip interrupt by notifying all interested parties
361  * that the pageflip has been completed.
362  */
363 static void dm_pflip_high_irq(void *interrupt_params)
364 {
365 	struct amdgpu_crtc *amdgpu_crtc;
366 	struct common_irq_params *irq_params = interrupt_params;
367 	struct amdgpu_device *adev = irq_params->adev;
368 	unsigned long flags;
369 	struct drm_pending_vblank_event *e;
370 	uint32_t vpos, hpos, v_blank_start, v_blank_end;
371 	bool vrr_active;
372 
373 	amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
374 
375 	/* IRQ could occur when in initial stage */
376 	/* TODO work and BO cleanup */
377 	if (amdgpu_crtc == NULL) {
378 		DC_LOG_PFLIP("CRTC is null, returning.\n");
379 		return;
380 	}
381 
382 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
383 
384 	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
385 		DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
386 						 amdgpu_crtc->pflip_status,
387 						 AMDGPU_FLIP_SUBMITTED,
388 						 amdgpu_crtc->crtc_id,
389 						 amdgpu_crtc);
390 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
391 		return;
392 	}
393 
394 	/* page flip completed. */
395 	e = amdgpu_crtc->event;
396 	amdgpu_crtc->event = NULL;
397 
398 	WARN_ON(!e);
399 
400 	vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
401 
402 	/* Fixed refresh rate, or VRR scanout position outside front-porch? */
403 	if (!vrr_active ||
404 	    !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
405 				      &v_blank_end, &hpos, &vpos) ||
406 	    (vpos < v_blank_start)) {
407 		/* Update to correct count and vblank timestamp if racing with
408 		 * vblank irq. This also updates to the correct vblank timestamp
409 		 * even in VRR mode, as scanout is past the front-porch atm.
410 		 */
411 		drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
412 
413 		/* Wake up userspace by sending the pageflip event with proper
414 		 * count and timestamp of vblank of flip completion.
415 		 */
416 		if (e) {
417 			drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
418 
419 			/* Event sent, so done with vblank for this flip */
420 			drm_crtc_vblank_put(&amdgpu_crtc->base);
421 		}
422 	} else if (e) {
423 		/* VRR active and inside front-porch: vblank count and
424 		 * timestamp for pageflip event will only be up to date after
425 		 * drm_crtc_handle_vblank() has been executed from late vblank
426 		 * irq handler after start of back-porch (vline 0). We queue the
427 		 * pageflip event for send-out by drm_crtc_handle_vblank() with
428 		 * updated timestamp and count, once it runs after us.
429 		 *
430 		 * We need to open-code this instead of using the helper
431 		 * drm_crtc_arm_vblank_event(), as that helper would
432 		 * call drm_crtc_accurate_vblank_count(), which we must
433 		 * not call in VRR mode while we are in front-porch!
434 		 */
435 
436 		/* sequence will be replaced by real count during send-out. */
437 		e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
438 		e->pipe = amdgpu_crtc->crtc_id;
439 
440 		list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
441 		e = NULL;
442 	}
443 
444 	/* Keep track of vblank of this flip for flip throttling. We use the
445 	 * cooked hw counter, as that one incremented at start of this vblank
446 	 * of pageflip completion, so last_flip_vblank is the forbidden count
447 	 * for queueing new pageflips if vsync + VRR is enabled.
448 	 */
449 	amdgpu_crtc->dm_irq_params.last_flip_vblank =
450 		amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
451 
452 	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
453 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
454 
455 	DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
456 		     amdgpu_crtc->crtc_id, amdgpu_crtc,
457 		     vrr_active, (int) !e);
458 }
459 
460 static void dm_vupdate_high_irq(void *interrupt_params)
461 {
462 	struct common_irq_params *irq_params = interrupt_params;
463 	struct amdgpu_device *adev = irq_params->adev;
464 	struct amdgpu_crtc *acrtc;
465 	struct drm_device *drm_dev;
466 	struct drm_vblank_crtc *vblank;
467 	ktime_t frame_duration_ns, previous_timestamp;
468 	unsigned long flags;
469 	int vrr_active;
470 
471 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
472 
473 	if (acrtc) {
474 		vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
475 		drm_dev = acrtc->base.dev;
476 		vblank = &drm_dev->vblank[acrtc->base.index];
477 		previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
478 		frame_duration_ns = vblank->time - previous_timestamp;
479 
480 		if (frame_duration_ns > 0) {
481 			trace_amdgpu_refresh_rate_track(acrtc->base.index,
482 						frame_duration_ns,
483 						ktime_divns(NSEC_PER_SEC, frame_duration_ns));
484 			atomic64_set(&irq_params->previous_timestamp, vblank->time);
485 		}
486 
487 		DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
488 			      acrtc->crtc_id,
489 			      vrr_active);
490 
491 		/* Core vblank handling is done here after end of front-porch in
492 		 * vrr mode, as vblank timestamping will give valid results
493 		 * while now done after front-porch. This will also deliver
494 		 * page-flip completion events that have been queued to us
495 		 * if a pageflip happened inside front-porch.
496 		 */
497 		if (vrr_active) {
498 			drm_crtc_handle_vblank(&acrtc->base);
499 
500 			/* BTR processing for pre-DCE12 ASICs */
501 			if (acrtc->dm_irq_params.stream &&
502 			    adev->family < AMDGPU_FAMILY_AI) {
503 				spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
504 				mod_freesync_handle_v_update(
505 				    adev->dm.freesync_module,
506 				    acrtc->dm_irq_params.stream,
507 				    &acrtc->dm_irq_params.vrr_params);
508 
509 				dc_stream_adjust_vmin_vmax(
510 				    adev->dm.dc,
511 				    acrtc->dm_irq_params.stream,
512 				    &acrtc->dm_irq_params.vrr_params.adjust);
513 				spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
514 			}
515 		}
516 	}
517 }
518 
519 /**
520  * dm_crtc_high_irq() - Handles CRTC interrupt
521  * @interrupt_params: used for determining the CRTC instance
522  *
523  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
524  * event handler.
525  */
526 static void dm_crtc_high_irq(void *interrupt_params)
527 {
528 	struct common_irq_params *irq_params = interrupt_params;
529 	struct amdgpu_device *adev = irq_params->adev;
530 	struct amdgpu_crtc *acrtc;
531 	unsigned long flags;
532 	int vrr_active;
533 
534 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
535 	if (!acrtc)
536 		return;
537 
538 	vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
539 
540 	DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
541 		      vrr_active, acrtc->dm_irq_params.active_planes);
542 
543 	/**
544 	 * Core vblank handling at start of front-porch is only possible
545 	 * in non-vrr mode, as only there vblank timestamping will give
546 	 * valid results while done in front-porch. Otherwise defer it
547 	 * to dm_vupdate_high_irq after end of front-porch.
548 	 */
549 	if (!vrr_active)
550 		drm_crtc_handle_vblank(&acrtc->base);
551 
552 	/**
553 	 * Following stuff must happen at start of vblank, for crc
554 	 * computation and below-the-range btr support in vrr mode.
555 	 */
556 	amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
557 
558 	/* BTR updates need to happen before VUPDATE on Vega and above. */
559 	if (adev->family < AMDGPU_FAMILY_AI)
560 		return;
561 
562 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
563 
564 	if (acrtc->dm_irq_params.stream &&
565 	    acrtc->dm_irq_params.vrr_params.supported &&
566 	    acrtc->dm_irq_params.freesync_config.state ==
567 		    VRR_STATE_ACTIVE_VARIABLE) {
568 		mod_freesync_handle_v_update(adev->dm.freesync_module,
569 					     acrtc->dm_irq_params.stream,
570 					     &acrtc->dm_irq_params.vrr_params);
571 
572 		dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
573 					   &acrtc->dm_irq_params.vrr_params.adjust);
574 	}
575 
576 	/*
577 	 * If there aren't any active_planes then DCH HUBP may be clock-gated.
578 	 * In that case, pageflip completion interrupts won't fire and pageflip
579 	 * completion events won't get delivered. Prevent this by sending
580 	 * pending pageflip events from here if a flip is still pending.
581 	 *
582 	 * If any planes are enabled, use dm_pflip_high_irq() instead, to
583 	 * avoid race conditions between flip programming and completion,
584 	 * which could cause too early flip completion events.
585 	 */
586 	if (adev->family >= AMDGPU_FAMILY_RV &&
587 	    acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
588 	    acrtc->dm_irq_params.active_planes == 0) {
589 		if (acrtc->event) {
590 			drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
591 			acrtc->event = NULL;
592 			drm_crtc_vblank_put(&acrtc->base);
593 		}
594 		acrtc->pflip_status = AMDGPU_FLIP_NONE;
595 	}
596 
597 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
598 }
599 
600 #if defined(CONFIG_DRM_AMD_DC_DCN)
601 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
602 /**
603  * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
604  * DCN generation ASICs
605  * @interrupt_params: interrupt parameters
606  *
607  * Used to set crc window/read out crc value at vertical line 0 position
608  */
609 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
610 {
611 	struct common_irq_params *irq_params = interrupt_params;
612 	struct amdgpu_device *adev = irq_params->adev;
613 	struct amdgpu_crtc *acrtc;
614 
615 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
616 
617 	if (!acrtc)
618 		return;
619 
620 	amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
621 }
622 #endif
623 
624 /**
625  * dmub_aux_setconfig_reply_callback - Callback for AUX or SET_CONFIG command.
626  * @adev: amdgpu_device pointer
627  * @notify: dmub notification structure
628  *
629  * Dmub AUX or SET_CONFIG command completion processing callback
630  * Copies dmub notification to DM which is to be read by AUX command.
631  * issuing thread and also signals the event to wake up the thread.
632  */
633 void dmub_aux_setconfig_callback(struct amdgpu_device *adev, struct dmub_notification *notify)
634 {
635 	if (adev->dm.dmub_notify)
636 		memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification));
637 	if (notify->type == DMUB_NOTIFICATION_AUX_REPLY)
638 		complete(&adev->dm.dmub_aux_transfer_done);
639 }
640 
641 /**
642  * dmub_hpd_callback - DMUB HPD interrupt processing callback.
643  * @adev: amdgpu_device pointer
644  * @notify: dmub notification structure
645  *
646  * Dmub Hpd interrupt processing callback. Gets displayindex through the
647  * ink index and calls helper to do the processing.
648  */
649 void dmub_hpd_callback(struct amdgpu_device *adev, struct dmub_notification *notify)
650 {
651 	struct amdgpu_dm_connector *aconnector;
652 	struct amdgpu_dm_connector *hpd_aconnector = NULL;
653 	struct drm_connector *connector;
654 	struct drm_connector_list_iter iter;
655 	struct dc_link *link;
656 	uint8_t link_index = 0;
657 	struct drm_device *dev = adev->dm.ddev;
658 
659 	if (adev == NULL)
660 		return;
661 
662 	if (notify == NULL) {
663 		DRM_ERROR("DMUB HPD callback notification was NULL");
664 		return;
665 	}
666 
667 	if (notify->link_index > adev->dm.dc->link_count) {
668 		DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index);
669 		return;
670 	}
671 
672 	drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
673 
674 	link_index = notify->link_index;
675 
676 	link = adev->dm.dc->links[link_index];
677 
678 	drm_connector_list_iter_begin(dev, &iter);
679 	drm_for_each_connector_iter(connector, &iter) {
680 		aconnector = to_amdgpu_dm_connector(connector);
681 		if (link && aconnector->dc_link == link) {
682 			DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index);
683 			hpd_aconnector = aconnector;
684 			break;
685 		}
686 	}
687 	drm_connector_list_iter_end(&iter);
688 	drm_modeset_unlock(&dev->mode_config.connection_mutex);
689 
690 	if (hpd_aconnector)
691 		handle_hpd_irq_helper(hpd_aconnector);
692 }
693 
694 /**
695  * register_dmub_notify_callback - Sets callback for DMUB notify
696  * @adev: amdgpu_device pointer
697  * @type: Type of dmub notification
698  * @callback: Dmub interrupt callback function
699  * @dmub_int_thread_offload: offload indicator
700  *
701  * API to register a dmub callback handler for a dmub notification
702  * Also sets indicator whether callback processing to be offloaded.
703  * to dmub interrupt handling thread
704  * Return: true if successfully registered, false if there is existing registration
705  */
706 bool register_dmub_notify_callback(struct amdgpu_device *adev, enum dmub_notification_type type,
707 dmub_notify_interrupt_callback_t callback, bool dmub_int_thread_offload)
708 {
709 	if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) {
710 		adev->dm.dmub_callback[type] = callback;
711 		adev->dm.dmub_thread_offload[type] = dmub_int_thread_offload;
712 	} else
713 		return false;
714 
715 	return true;
716 }
717 
718 static void dm_handle_hpd_work(struct work_struct *work)
719 {
720 	struct dmub_hpd_work *dmub_hpd_wrk;
721 
722 	dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work);
723 
724 	if (!dmub_hpd_wrk->dmub_notify) {
725 		DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL");
726 		return;
727 	}
728 
729 	if (dmub_hpd_wrk->dmub_notify->type < ARRAY_SIZE(dmub_hpd_wrk->adev->dm.dmub_callback)) {
730 		dmub_hpd_wrk->adev->dm.dmub_callback[dmub_hpd_wrk->dmub_notify->type](dmub_hpd_wrk->adev,
731 		dmub_hpd_wrk->dmub_notify);
732 	}
733 	kfree(dmub_hpd_wrk);
734 
735 }
736 
737 #define DMUB_TRACE_MAX_READ 64
738 /**
739  * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
740  * @interrupt_params: used for determining the Outbox instance
741  *
742  * Handles the Outbox Interrupt
743  * event handler.
744  */
745 static void dm_dmub_outbox1_low_irq(void *interrupt_params)
746 {
747 	struct dmub_notification notify;
748 	struct common_irq_params *irq_params = interrupt_params;
749 	struct amdgpu_device *adev = irq_params->adev;
750 	struct amdgpu_display_manager *dm = &adev->dm;
751 	struct dmcub_trace_buf_entry entry = { 0 };
752 	uint32_t count = 0;
753 	struct dmub_hpd_work *dmub_hpd_wrk;
754 	struct dc_link *plink = NULL;
755 
756 	if (dc_enable_dmub_notifications(adev->dm.dc) &&
757 		irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
758 		dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC);
759 		if (!dmub_hpd_wrk) {
760 			DRM_ERROR("Failed to allocate dmub_hpd_wrk");
761 			return;
762 		}
763 		INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work);
764 
765 		do {
766 			dc_stat_get_dmub_notification(adev->dm.dc, &notify);
767 			if (notify.type > ARRAY_SIZE(dm->dmub_thread_offload)) {
768 				DRM_ERROR("DM: notify type %d invalid!", notify.type);
769 				continue;
770 			}
771 			if (dm->dmub_thread_offload[notify.type] == true) {
772 				dmub_hpd_wrk->dmub_notify = &notify;
773 				dmub_hpd_wrk->adev = adev;
774 				if (notify.type == DMUB_NOTIFICATION_HPD) {
775 					plink = adev->dm.dc->links[notify.link_index];
776 					if (plink) {
777 						plink->hpd_status =
778 							notify.hpd_status ==
779 							DP_HPD_PLUG ? true : false;
780 					}
781 				}
782 				queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work);
783 			} else {
784 				dm->dmub_callback[notify.type](adev, &notify);
785 			}
786 		} while (notify.pending_notification);
787 	}
788 
789 
790 	do {
791 		if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
792 			trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
793 							entry.param0, entry.param1);
794 
795 			DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
796 				 entry.trace_code, entry.tick_count, entry.param0, entry.param1);
797 		} else
798 			break;
799 
800 		count++;
801 
802 	} while (count <= DMUB_TRACE_MAX_READ);
803 
804 	if (count > DMUB_TRACE_MAX_READ)
805 		DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ");
806 }
807 #endif
808 
809 static int dm_set_clockgating_state(void *handle,
810 		  enum amd_clockgating_state state)
811 {
812 	return 0;
813 }
814 
815 static int dm_set_powergating_state(void *handle,
816 		  enum amd_powergating_state state)
817 {
818 	return 0;
819 }
820 
821 /* Prototypes of private functions */
822 static int dm_early_init(void* handle);
823 
824 /* Allocate memory for FBC compressed data  */
825 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
826 {
827 	struct drm_device *dev = connector->dev;
828 	struct amdgpu_device *adev = drm_to_adev(dev);
829 	struct dm_compressor_info *compressor = &adev->dm.compressor;
830 	struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
831 	struct drm_display_mode *mode;
832 	unsigned long max_size = 0;
833 
834 	if (adev->dm.dc->fbc_compressor == NULL)
835 		return;
836 
837 	if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
838 		return;
839 
840 	if (compressor->bo_ptr)
841 		return;
842 
843 
844 	list_for_each_entry(mode, &connector->modes, head) {
845 		if (max_size < mode->htotal * mode->vtotal)
846 			max_size = mode->htotal * mode->vtotal;
847 	}
848 
849 	if (max_size) {
850 		int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
851 			    AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
852 			    &compressor->gpu_addr, &compressor->cpu_addr);
853 
854 		if (r)
855 			DRM_ERROR("DM: Failed to initialize FBC\n");
856 		else {
857 			adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
858 			DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
859 		}
860 
861 	}
862 
863 }
864 
865 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
866 					  int pipe, bool *enabled,
867 					  unsigned char *buf, int max_bytes)
868 {
869 	struct drm_device *dev = dev_get_drvdata(kdev);
870 	struct amdgpu_device *adev = drm_to_adev(dev);
871 	struct drm_connector *connector;
872 	struct drm_connector_list_iter conn_iter;
873 	struct amdgpu_dm_connector *aconnector;
874 	int ret = 0;
875 
876 	*enabled = false;
877 
878 	mutex_lock(&adev->dm.audio_lock);
879 
880 	drm_connector_list_iter_begin(dev, &conn_iter);
881 	drm_for_each_connector_iter(connector, &conn_iter) {
882 		aconnector = to_amdgpu_dm_connector(connector);
883 		if (aconnector->audio_inst != port)
884 			continue;
885 
886 		*enabled = true;
887 		ret = drm_eld_size(connector->eld);
888 		memcpy(buf, connector->eld, min(max_bytes, ret));
889 
890 		break;
891 	}
892 	drm_connector_list_iter_end(&conn_iter);
893 
894 	mutex_unlock(&adev->dm.audio_lock);
895 
896 	DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
897 
898 	return ret;
899 }
900 
901 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
902 	.get_eld = amdgpu_dm_audio_component_get_eld,
903 };
904 
905 static int amdgpu_dm_audio_component_bind(struct device *kdev,
906 				       struct device *hda_kdev, void *data)
907 {
908 	struct drm_device *dev = dev_get_drvdata(kdev);
909 	struct amdgpu_device *adev = drm_to_adev(dev);
910 	struct drm_audio_component *acomp = data;
911 
912 	acomp->ops = &amdgpu_dm_audio_component_ops;
913 	acomp->dev = kdev;
914 	adev->dm.audio_component = acomp;
915 
916 	return 0;
917 }
918 
919 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
920 					  struct device *hda_kdev, void *data)
921 {
922 	struct drm_device *dev = dev_get_drvdata(kdev);
923 	struct amdgpu_device *adev = drm_to_adev(dev);
924 	struct drm_audio_component *acomp = data;
925 
926 	acomp->ops = NULL;
927 	acomp->dev = NULL;
928 	adev->dm.audio_component = NULL;
929 }
930 
931 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
932 	.bind	= amdgpu_dm_audio_component_bind,
933 	.unbind	= amdgpu_dm_audio_component_unbind,
934 };
935 
936 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
937 {
938 	int i, ret;
939 
940 	if (!amdgpu_audio)
941 		return 0;
942 
943 	adev->mode_info.audio.enabled = true;
944 
945 	adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
946 
947 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
948 		adev->mode_info.audio.pin[i].channels = -1;
949 		adev->mode_info.audio.pin[i].rate = -1;
950 		adev->mode_info.audio.pin[i].bits_per_sample = -1;
951 		adev->mode_info.audio.pin[i].status_bits = 0;
952 		adev->mode_info.audio.pin[i].category_code = 0;
953 		adev->mode_info.audio.pin[i].connected = false;
954 		adev->mode_info.audio.pin[i].id =
955 			adev->dm.dc->res_pool->audios[i]->inst;
956 		adev->mode_info.audio.pin[i].offset = 0;
957 	}
958 
959 	ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
960 	if (ret < 0)
961 		return ret;
962 
963 	adev->dm.audio_registered = true;
964 
965 	return 0;
966 }
967 
968 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
969 {
970 	if (!amdgpu_audio)
971 		return;
972 
973 	if (!adev->mode_info.audio.enabled)
974 		return;
975 
976 	if (adev->dm.audio_registered) {
977 		component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
978 		adev->dm.audio_registered = false;
979 	}
980 
981 	/* TODO: Disable audio? */
982 
983 	adev->mode_info.audio.enabled = false;
984 }
985 
986 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
987 {
988 	struct drm_audio_component *acomp = adev->dm.audio_component;
989 
990 	if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
991 		DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
992 
993 		acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
994 						 pin, -1);
995 	}
996 }
997 
998 static int dm_dmub_hw_init(struct amdgpu_device *adev)
999 {
1000 	const struct dmcub_firmware_header_v1_0 *hdr;
1001 	struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1002 	struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
1003 	const struct firmware *dmub_fw = adev->dm.dmub_fw;
1004 	struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
1005 	struct abm *abm = adev->dm.dc->res_pool->abm;
1006 	struct dmub_srv_hw_params hw_params;
1007 	enum dmub_status status;
1008 	const unsigned char *fw_inst_const, *fw_bss_data;
1009 	uint32_t i, fw_inst_const_size, fw_bss_data_size;
1010 	bool has_hw_support;
1011 
1012 	if (!dmub_srv)
1013 		/* DMUB isn't supported on the ASIC. */
1014 		return 0;
1015 
1016 	if (!fb_info) {
1017 		DRM_ERROR("No framebuffer info for DMUB service.\n");
1018 		return -EINVAL;
1019 	}
1020 
1021 	if (!dmub_fw) {
1022 		/* Firmware required for DMUB support. */
1023 		DRM_ERROR("No firmware provided for DMUB.\n");
1024 		return -EINVAL;
1025 	}
1026 
1027 	status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
1028 	if (status != DMUB_STATUS_OK) {
1029 		DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
1030 		return -EINVAL;
1031 	}
1032 
1033 	if (!has_hw_support) {
1034 		DRM_INFO("DMUB unsupported on ASIC\n");
1035 		return 0;
1036 	}
1037 
1038 	hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
1039 
1040 	fw_inst_const = dmub_fw->data +
1041 			le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1042 			PSP_HEADER_BYTES;
1043 
1044 	fw_bss_data = dmub_fw->data +
1045 		      le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1046 		      le32_to_cpu(hdr->inst_const_bytes);
1047 
1048 	/* Copy firmware and bios info into FB memory. */
1049 	fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1050 			     PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1051 
1052 	fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1053 
1054 	/* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
1055 	 * amdgpu_ucode_init_single_fw will load dmub firmware
1056 	 * fw_inst_const part to cw0; otherwise, the firmware back door load
1057 	 * will be done by dm_dmub_hw_init
1058 	 */
1059 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1060 		memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
1061 				fw_inst_const_size);
1062 	}
1063 
1064 	if (fw_bss_data_size)
1065 		memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
1066 		       fw_bss_data, fw_bss_data_size);
1067 
1068 	/* Copy firmware bios info into FB memory. */
1069 	memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
1070 	       adev->bios_size);
1071 
1072 	/* Reset regions that need to be reset. */
1073 	memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
1074 	fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
1075 
1076 	memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
1077 	       fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
1078 
1079 	memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
1080 	       fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
1081 
1082 	/* Initialize hardware. */
1083 	memset(&hw_params, 0, sizeof(hw_params));
1084 	hw_params.fb_base = adev->gmc.fb_start;
1085 	hw_params.fb_offset = adev->gmc.aper_base;
1086 
1087 	/* backdoor load firmware and trigger dmub running */
1088 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
1089 		hw_params.load_inst_const = true;
1090 
1091 	if (dmcu)
1092 		hw_params.psp_version = dmcu->psp_version;
1093 
1094 	for (i = 0; i < fb_info->num_fb; ++i)
1095 		hw_params.fb[i] = &fb_info->fb[i];
1096 
1097 	status = dmub_srv_hw_init(dmub_srv, &hw_params);
1098 	if (status != DMUB_STATUS_OK) {
1099 		DRM_ERROR("Error initializing DMUB HW: %d\n", status);
1100 		return -EINVAL;
1101 	}
1102 
1103 	/* Wait for firmware load to finish. */
1104 	status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1105 	if (status != DMUB_STATUS_OK)
1106 		DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1107 
1108 	/* Init DMCU and ABM if available. */
1109 	if (dmcu && abm) {
1110 		dmcu->funcs->dmcu_init(dmcu);
1111 		abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1112 	}
1113 
1114 	if (!adev->dm.dc->ctx->dmub_srv)
1115 		adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
1116 	if (!adev->dm.dc->ctx->dmub_srv) {
1117 		DRM_ERROR("Couldn't allocate DC DMUB server!\n");
1118 		return -ENOMEM;
1119 	}
1120 
1121 	DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
1122 		 adev->dm.dmcub_fw_version);
1123 
1124 	return 0;
1125 }
1126 
1127 #if defined(CONFIG_DRM_AMD_DC_DCN)
1128 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
1129 {
1130 	uint64_t pt_base;
1131 	uint32_t logical_addr_low;
1132 	uint32_t logical_addr_high;
1133 	uint32_t agp_base, agp_bot, agp_top;
1134 	PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
1135 
1136 	memset(pa_config, 0, sizeof(*pa_config));
1137 
1138 	logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
1139 	pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
1140 
1141 	if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1142 		/*
1143 		 * Raven2 has a HW issue that it is unable to use the vram which
1144 		 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1145 		 * workaround that increase system aperture high address (add 1)
1146 		 * to get rid of the VM fault and hardware hang.
1147 		 */
1148 		logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
1149 	else
1150 		logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
1151 
1152 	agp_base = 0;
1153 	agp_bot = adev->gmc.agp_start >> 24;
1154 	agp_top = adev->gmc.agp_end >> 24;
1155 
1156 
1157 	page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
1158 	page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
1159 	page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
1160 	page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
1161 	page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
1162 	page_table_base.low_part = lower_32_bits(pt_base);
1163 
1164 	pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1165 	pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1166 
1167 	pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1168 	pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1169 	pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1170 
1171 	pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1172 	pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1173 	pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1174 
1175 	pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1176 	pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1177 	pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1178 
1179 	pa_config->is_hvm_enabled = 0;
1180 
1181 }
1182 #endif
1183 #if defined(CONFIG_DRM_AMD_DC_DCN)
1184 static void vblank_control_worker(struct work_struct *work)
1185 {
1186 	struct vblank_control_work *vblank_work =
1187 		container_of(work, struct vblank_control_work, work);
1188 	struct amdgpu_display_manager *dm = vblank_work->dm;
1189 
1190 	mutex_lock(&dm->dc_lock);
1191 
1192 	if (vblank_work->enable)
1193 		dm->active_vblank_irq_count++;
1194 	else if(dm->active_vblank_irq_count)
1195 		dm->active_vblank_irq_count--;
1196 
1197 	dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
1198 
1199 	DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
1200 
1201 	/* Control PSR based on vblank requirements from OS */
1202 	if (vblank_work->stream && vblank_work->stream->link) {
1203 		if (vblank_work->enable) {
1204 			if (vblank_work->stream->link->psr_settings.psr_allow_active)
1205 				amdgpu_dm_psr_disable(vblank_work->stream);
1206 		} else if (vblank_work->stream->link->psr_settings.psr_feature_enabled &&
1207 			   !vblank_work->stream->link->psr_settings.psr_allow_active &&
1208 			   vblank_work->acrtc->dm_irq_params.allow_psr_entry) {
1209 			amdgpu_dm_psr_enable(vblank_work->stream);
1210 		}
1211 	}
1212 
1213 	mutex_unlock(&dm->dc_lock);
1214 
1215 	dc_stream_release(vblank_work->stream);
1216 
1217 	kfree(vblank_work);
1218 }
1219 
1220 #endif
1221 
1222 static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
1223 {
1224 	struct hpd_rx_irq_offload_work *offload_work;
1225 	struct amdgpu_dm_connector *aconnector;
1226 	struct dc_link *dc_link;
1227 	struct amdgpu_device *adev;
1228 	enum dc_connection_type new_connection_type = dc_connection_none;
1229 	unsigned long flags;
1230 
1231 	offload_work = container_of(work, struct hpd_rx_irq_offload_work, work);
1232 	aconnector = offload_work->offload_wq->aconnector;
1233 
1234 	if (!aconnector) {
1235 		DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work");
1236 		goto skip;
1237 	}
1238 
1239 	adev = drm_to_adev(aconnector->base.dev);
1240 	dc_link = aconnector->dc_link;
1241 
1242 	mutex_lock(&aconnector->hpd_lock);
1243 	if (!dc_link_detect_sink(dc_link, &new_connection_type))
1244 		DRM_ERROR("KMS: Failed to detect connector\n");
1245 	mutex_unlock(&aconnector->hpd_lock);
1246 
1247 	if (new_connection_type == dc_connection_none)
1248 		goto skip;
1249 
1250 	if (amdgpu_in_reset(adev))
1251 		goto skip;
1252 
1253 	mutex_lock(&adev->dm.dc_lock);
1254 	if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST)
1255 		dc_link_dp_handle_automated_test(dc_link);
1256 	else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
1257 			hpd_rx_irq_check_link_loss_status(dc_link, &offload_work->data) &&
1258 			dc_link_dp_allow_hpd_rx_irq(dc_link)) {
1259 		dc_link_dp_handle_link_loss(dc_link);
1260 		spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
1261 		offload_work->offload_wq->is_handling_link_loss = false;
1262 		spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
1263 	}
1264 	mutex_unlock(&adev->dm.dc_lock);
1265 
1266 skip:
1267 	kfree(offload_work);
1268 
1269 }
1270 
1271 static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc)
1272 {
1273 	int max_caps = dc->caps.max_links;
1274 	int i = 0;
1275 	struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL;
1276 
1277 	hpd_rx_offload_wq = kcalloc(max_caps, sizeof(*hpd_rx_offload_wq), GFP_KERNEL);
1278 
1279 	if (!hpd_rx_offload_wq)
1280 		return NULL;
1281 
1282 
1283 	for (i = 0; i < max_caps; i++) {
1284 		hpd_rx_offload_wq[i].wq =
1285 				    create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq");
1286 
1287 		if (hpd_rx_offload_wq[i].wq == NULL) {
1288 			DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!");
1289 			return NULL;
1290 		}
1291 
1292 		spin_lock_init(&hpd_rx_offload_wq[i].offload_lock);
1293 	}
1294 
1295 	return hpd_rx_offload_wq;
1296 }
1297 
1298 struct amdgpu_stutter_quirk {
1299 	u16 chip_vendor;
1300 	u16 chip_device;
1301 	u16 subsys_vendor;
1302 	u16 subsys_device;
1303 	u8 revision;
1304 };
1305 
1306 static const struct amdgpu_stutter_quirk amdgpu_stutter_quirk_list[] = {
1307 	/* https://bugzilla.kernel.org/show_bug.cgi?id=214417 */
1308 	{ 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
1309 	{ 0, 0, 0, 0, 0 },
1310 };
1311 
1312 static bool dm_should_disable_stutter(struct pci_dev *pdev)
1313 {
1314 	const struct amdgpu_stutter_quirk *p = amdgpu_stutter_quirk_list;
1315 
1316 	while (p && p->chip_device != 0) {
1317 		if (pdev->vendor == p->chip_vendor &&
1318 		    pdev->device == p->chip_device &&
1319 		    pdev->subsystem_vendor == p->subsys_vendor &&
1320 		    pdev->subsystem_device == p->subsys_device &&
1321 		    pdev->revision == p->revision) {
1322 			return true;
1323 		}
1324 		++p;
1325 	}
1326 	return false;
1327 }
1328 
1329 static int amdgpu_dm_init(struct amdgpu_device *adev)
1330 {
1331 	struct dc_init_data init_data;
1332 #ifdef CONFIG_DRM_AMD_DC_HDCP
1333 	struct dc_callback_init init_params;
1334 #endif
1335 	int r;
1336 
1337 	adev->dm.ddev = adev_to_drm(adev);
1338 	adev->dm.adev = adev;
1339 
1340 	/* Zero all the fields */
1341 	memset(&init_data, 0, sizeof(init_data));
1342 #ifdef CONFIG_DRM_AMD_DC_HDCP
1343 	memset(&init_params, 0, sizeof(init_params));
1344 #endif
1345 
1346 	mutex_init(&adev->dm.dc_lock);
1347 	mutex_init(&adev->dm.audio_lock);
1348 #if defined(CONFIG_DRM_AMD_DC_DCN)
1349 	spin_lock_init(&adev->dm.vblank_lock);
1350 #endif
1351 
1352 	if(amdgpu_dm_irq_init(adev)) {
1353 		DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1354 		goto error;
1355 	}
1356 
1357 	init_data.asic_id.chip_family = adev->family;
1358 
1359 	init_data.asic_id.pci_revision_id = adev->pdev->revision;
1360 	init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1361 	init_data.asic_id.chip_id = adev->pdev->device;
1362 
1363 	init_data.asic_id.vram_width = adev->gmc.vram_width;
1364 	/* TODO: initialize init_data.asic_id.vram_type here!!!! */
1365 	init_data.asic_id.atombios_base_address =
1366 		adev->mode_info.atom_context->bios;
1367 
1368 	init_data.driver = adev;
1369 
1370 	adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1371 
1372 	if (!adev->dm.cgs_device) {
1373 		DRM_ERROR("amdgpu: failed to create cgs device.\n");
1374 		goto error;
1375 	}
1376 
1377 	init_data.cgs_device = adev->dm.cgs_device;
1378 
1379 	init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1380 
1381 	switch (adev->asic_type) {
1382 	case CHIP_CARRIZO:
1383 	case CHIP_STONEY:
1384 		init_data.flags.gpu_vm_support = true;
1385 		break;
1386 	default:
1387 		switch (adev->ip_versions[DCE_HWIP][0]) {
1388 		case IP_VERSION(2, 1, 0):
1389 			init_data.flags.gpu_vm_support = true;
1390 			init_data.flags.disable_dmcu = true;
1391 			break;
1392 		case IP_VERSION(1, 0, 0):
1393 		case IP_VERSION(1, 0, 1):
1394 		case IP_VERSION(3, 0, 1):
1395 		case IP_VERSION(3, 1, 2):
1396 		case IP_VERSION(3, 1, 3):
1397 			init_data.flags.gpu_vm_support = true;
1398 			break;
1399 		case IP_VERSION(2, 0, 3):
1400 			init_data.flags.disable_dmcu = true;
1401 			break;
1402 		default:
1403 			break;
1404 		}
1405 		break;
1406 	}
1407 
1408 	if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1409 		init_data.flags.fbc_support = true;
1410 
1411 	if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1412 		init_data.flags.multi_mon_pp_mclk_switch = true;
1413 
1414 	if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1415 		init_data.flags.disable_fractional_pwm = true;
1416 
1417 	if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING)
1418 		init_data.flags.edp_no_power_sequencing = true;
1419 
1420 	init_data.flags.power_down_display_on_boot = true;
1421 
1422 	INIT_LIST_HEAD(&adev->dm.da_list);
1423 	/* Display Core create. */
1424 	adev->dm.dc = dc_create(&init_data);
1425 
1426 	if (adev->dm.dc) {
1427 		DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1428 	} else {
1429 		DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1430 		goto error;
1431 	}
1432 
1433 	if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1434 		adev->dm.dc->debug.force_single_disp_pipe_split = false;
1435 		adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1436 	}
1437 
1438 	if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1439 		adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1440 	if (dm_should_disable_stutter(adev->pdev))
1441 		adev->dm.dc->debug.disable_stutter = true;
1442 
1443 	if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1444 		adev->dm.dc->debug.disable_stutter = true;
1445 
1446 	if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1447 		adev->dm.dc->debug.disable_dsc = true;
1448 
1449 	if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1450 		adev->dm.dc->debug.disable_clock_gate = true;
1451 
1452 	r = dm_dmub_hw_init(adev);
1453 	if (r) {
1454 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1455 		goto error;
1456 	}
1457 
1458 	dc_hardware_init(adev->dm.dc);
1459 
1460 	adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc);
1461 	if (!adev->dm.hpd_rx_offload_wq) {
1462 		DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n");
1463 		goto error;
1464 	}
1465 
1466 #if defined(CONFIG_DRM_AMD_DC_DCN)
1467 	if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) {
1468 		struct dc_phy_addr_space_config pa_config;
1469 
1470 		mmhub_read_system_context(adev, &pa_config);
1471 
1472 		// Call the DC init_memory func
1473 		dc_setup_system_context(adev->dm.dc, &pa_config);
1474 	}
1475 #endif
1476 
1477 	adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1478 	if (!adev->dm.freesync_module) {
1479 		DRM_ERROR(
1480 		"amdgpu: failed to initialize freesync_module.\n");
1481 	} else
1482 		DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1483 				adev->dm.freesync_module);
1484 
1485 	amdgpu_dm_init_color_mod();
1486 
1487 #if defined(CONFIG_DRM_AMD_DC_DCN)
1488 	if (adev->dm.dc->caps.max_links > 0) {
1489 		adev->dm.vblank_control_workqueue =
1490 			create_singlethread_workqueue("dm_vblank_control_workqueue");
1491 		if (!adev->dm.vblank_control_workqueue)
1492 			DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1493 	}
1494 #endif
1495 
1496 #ifdef CONFIG_DRM_AMD_DC_HDCP
1497 	if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) {
1498 		adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1499 
1500 		if (!adev->dm.hdcp_workqueue)
1501 			DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1502 		else
1503 			DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1504 
1505 		dc_init_callbacks(adev->dm.dc, &init_params);
1506 	}
1507 #endif
1508 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1509 	adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
1510 #endif
1511 	if (dc_enable_dmub_notifications(adev->dm.dc)) {
1512 		init_completion(&adev->dm.dmub_aux_transfer_done);
1513 		adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
1514 		if (!adev->dm.dmub_notify) {
1515 			DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
1516 			goto error;
1517 		}
1518 
1519 		adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq");
1520 		if (!adev->dm.delayed_hpd_wq) {
1521 			DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n");
1522 			goto error;
1523 		}
1524 
1525 		amdgpu_dm_outbox_init(adev);
1526 #if defined(CONFIG_DRM_AMD_DC_DCN)
1527 		if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY,
1528 			dmub_aux_setconfig_callback, false)) {
1529 			DRM_ERROR("amdgpu: fail to register dmub aux callback");
1530 			goto error;
1531 		}
1532 		if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) {
1533 			DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1534 			goto error;
1535 		}
1536 #endif
1537 	}
1538 
1539 	if (amdgpu_dm_initialize_drm_device(adev)) {
1540 		DRM_ERROR(
1541 		"amdgpu: failed to initialize sw for display support.\n");
1542 		goto error;
1543 	}
1544 
1545 	/* create fake encoders for MST */
1546 	dm_dp_create_fake_mst_encoders(adev);
1547 
1548 	/* TODO: Add_display_info? */
1549 
1550 	/* TODO use dynamic cursor width */
1551 	adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1552 	adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1553 
1554 	if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1555 		DRM_ERROR(
1556 		"amdgpu: failed to initialize sw for display support.\n");
1557 		goto error;
1558 	}
1559 
1560 
1561 	DRM_DEBUG_DRIVER("KMS initialized.\n");
1562 
1563 	return 0;
1564 error:
1565 	amdgpu_dm_fini(adev);
1566 
1567 	return -EINVAL;
1568 }
1569 
1570 static int amdgpu_dm_early_fini(void *handle)
1571 {
1572 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1573 
1574 	amdgpu_dm_audio_fini(adev);
1575 
1576 	return 0;
1577 }
1578 
1579 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1580 {
1581 	int i;
1582 
1583 #if defined(CONFIG_DRM_AMD_DC_DCN)
1584 	if (adev->dm.vblank_control_workqueue) {
1585 		destroy_workqueue(adev->dm.vblank_control_workqueue);
1586 		adev->dm.vblank_control_workqueue = NULL;
1587 	}
1588 #endif
1589 
1590 	for (i = 0; i < adev->dm.display_indexes_num; i++) {
1591 		drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1592 	}
1593 
1594 	amdgpu_dm_destroy_drm_device(&adev->dm);
1595 
1596 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1597 	if (adev->dm.crc_rd_wrk) {
1598 		flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1599 		kfree(adev->dm.crc_rd_wrk);
1600 		adev->dm.crc_rd_wrk = NULL;
1601 	}
1602 #endif
1603 #ifdef CONFIG_DRM_AMD_DC_HDCP
1604 	if (adev->dm.hdcp_workqueue) {
1605 		hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1606 		adev->dm.hdcp_workqueue = NULL;
1607 	}
1608 
1609 	if (adev->dm.dc)
1610 		dc_deinit_callbacks(adev->dm.dc);
1611 #endif
1612 
1613 	dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1614 
1615 	if (dc_enable_dmub_notifications(adev->dm.dc)) {
1616 		kfree(adev->dm.dmub_notify);
1617 		adev->dm.dmub_notify = NULL;
1618 		destroy_workqueue(adev->dm.delayed_hpd_wq);
1619 		adev->dm.delayed_hpd_wq = NULL;
1620 	}
1621 
1622 	if (adev->dm.dmub_bo)
1623 		amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1624 				      &adev->dm.dmub_bo_gpu_addr,
1625 				      &adev->dm.dmub_bo_cpu_addr);
1626 
1627 	if (adev->dm.hpd_rx_offload_wq) {
1628 		for (i = 0; i < adev->dm.dc->caps.max_links; i++) {
1629 			if (adev->dm.hpd_rx_offload_wq[i].wq) {
1630 				destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq);
1631 				adev->dm.hpd_rx_offload_wq[i].wq = NULL;
1632 			}
1633 		}
1634 
1635 		kfree(adev->dm.hpd_rx_offload_wq);
1636 		adev->dm.hpd_rx_offload_wq = NULL;
1637 	}
1638 
1639 	/* DC Destroy TODO: Replace destroy DAL */
1640 	if (adev->dm.dc)
1641 		dc_destroy(&adev->dm.dc);
1642 	/*
1643 	 * TODO: pageflip, vlank interrupt
1644 	 *
1645 	 * amdgpu_dm_irq_fini(adev);
1646 	 */
1647 
1648 	if (adev->dm.cgs_device) {
1649 		amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1650 		adev->dm.cgs_device = NULL;
1651 	}
1652 	if (adev->dm.freesync_module) {
1653 		mod_freesync_destroy(adev->dm.freesync_module);
1654 		adev->dm.freesync_module = NULL;
1655 	}
1656 
1657 	mutex_destroy(&adev->dm.audio_lock);
1658 	mutex_destroy(&adev->dm.dc_lock);
1659 
1660 	return;
1661 }
1662 
1663 static int load_dmcu_fw(struct amdgpu_device *adev)
1664 {
1665 	const char *fw_name_dmcu = NULL;
1666 	int r;
1667 	const struct dmcu_firmware_header_v1_0 *hdr;
1668 
1669 	switch(adev->asic_type) {
1670 #if defined(CONFIG_DRM_AMD_DC_SI)
1671 	case CHIP_TAHITI:
1672 	case CHIP_PITCAIRN:
1673 	case CHIP_VERDE:
1674 	case CHIP_OLAND:
1675 #endif
1676 	case CHIP_BONAIRE:
1677 	case CHIP_HAWAII:
1678 	case CHIP_KAVERI:
1679 	case CHIP_KABINI:
1680 	case CHIP_MULLINS:
1681 	case CHIP_TONGA:
1682 	case CHIP_FIJI:
1683 	case CHIP_CARRIZO:
1684 	case CHIP_STONEY:
1685 	case CHIP_POLARIS11:
1686 	case CHIP_POLARIS10:
1687 	case CHIP_POLARIS12:
1688 	case CHIP_VEGAM:
1689 	case CHIP_VEGA10:
1690 	case CHIP_VEGA12:
1691 	case CHIP_VEGA20:
1692 		return 0;
1693 	case CHIP_NAVI12:
1694 		fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1695 		break;
1696 	case CHIP_RAVEN:
1697 		if (ASICREV_IS_PICASSO(adev->external_rev_id))
1698 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1699 		else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1700 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1701 		else
1702 			return 0;
1703 		break;
1704 	default:
1705 		switch (adev->ip_versions[DCE_HWIP][0]) {
1706 		case IP_VERSION(2, 0, 2):
1707 		case IP_VERSION(2, 0, 3):
1708 		case IP_VERSION(2, 0, 0):
1709 		case IP_VERSION(2, 1, 0):
1710 		case IP_VERSION(3, 0, 0):
1711 		case IP_VERSION(3, 0, 2):
1712 		case IP_VERSION(3, 0, 3):
1713 		case IP_VERSION(3, 0, 1):
1714 		case IP_VERSION(3, 1, 2):
1715 		case IP_VERSION(3, 1, 3):
1716 			return 0;
1717 		default:
1718 			break;
1719 		}
1720 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1721 		return -EINVAL;
1722 	}
1723 
1724 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1725 		DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1726 		return 0;
1727 	}
1728 
1729 	r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1730 	if (r == -ENOENT) {
1731 		/* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1732 		DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1733 		adev->dm.fw_dmcu = NULL;
1734 		return 0;
1735 	}
1736 	if (r) {
1737 		dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1738 			fw_name_dmcu);
1739 		return r;
1740 	}
1741 
1742 	r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1743 	if (r) {
1744 		dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1745 			fw_name_dmcu);
1746 		release_firmware(adev->dm.fw_dmcu);
1747 		adev->dm.fw_dmcu = NULL;
1748 		return r;
1749 	}
1750 
1751 	hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1752 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1753 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1754 	adev->firmware.fw_size +=
1755 		ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1756 
1757 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1758 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1759 	adev->firmware.fw_size +=
1760 		ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1761 
1762 	adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1763 
1764 	DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1765 
1766 	return 0;
1767 }
1768 
1769 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1770 {
1771 	struct amdgpu_device *adev = ctx;
1772 
1773 	return dm_read_reg(adev->dm.dc->ctx, address);
1774 }
1775 
1776 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1777 				     uint32_t value)
1778 {
1779 	struct amdgpu_device *adev = ctx;
1780 
1781 	return dm_write_reg(adev->dm.dc->ctx, address, value);
1782 }
1783 
1784 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1785 {
1786 	struct dmub_srv_create_params create_params;
1787 	struct dmub_srv_region_params region_params;
1788 	struct dmub_srv_region_info region_info;
1789 	struct dmub_srv_fb_params fb_params;
1790 	struct dmub_srv_fb_info *fb_info;
1791 	struct dmub_srv *dmub_srv;
1792 	const struct dmcub_firmware_header_v1_0 *hdr;
1793 	const char *fw_name_dmub;
1794 	enum dmub_asic dmub_asic;
1795 	enum dmub_status status;
1796 	int r;
1797 
1798 	switch (adev->ip_versions[DCE_HWIP][0]) {
1799 	case IP_VERSION(2, 1, 0):
1800 		dmub_asic = DMUB_ASIC_DCN21;
1801 		fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1802 		if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1803 			fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1804 		break;
1805 	case IP_VERSION(3, 0, 0):
1806 		if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) {
1807 			dmub_asic = DMUB_ASIC_DCN30;
1808 			fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1809 		} else {
1810 			dmub_asic = DMUB_ASIC_DCN30;
1811 			fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1812 		}
1813 		break;
1814 	case IP_VERSION(3, 0, 1):
1815 		dmub_asic = DMUB_ASIC_DCN301;
1816 		fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1817 		break;
1818 	case IP_VERSION(3, 0, 2):
1819 		dmub_asic = DMUB_ASIC_DCN302;
1820 		fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1821 		break;
1822 	case IP_VERSION(3, 0, 3):
1823 		dmub_asic = DMUB_ASIC_DCN303;
1824 		fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
1825 		break;
1826 	case IP_VERSION(3, 1, 2):
1827 	case IP_VERSION(3, 1, 3):
1828 		dmub_asic = DMUB_ASIC_DCN31;
1829 		fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
1830 		break;
1831 
1832 	default:
1833 		/* ASIC doesn't support DMUB. */
1834 		return 0;
1835 	}
1836 
1837 	r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1838 	if (r) {
1839 		DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1840 		return 0;
1841 	}
1842 
1843 	r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1844 	if (r) {
1845 		DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1846 		return 0;
1847 	}
1848 
1849 	hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1850 	adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1851 
1852 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1853 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1854 			AMDGPU_UCODE_ID_DMCUB;
1855 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1856 			adev->dm.dmub_fw;
1857 		adev->firmware.fw_size +=
1858 			ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1859 
1860 		DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1861 			 adev->dm.dmcub_fw_version);
1862 	}
1863 
1864 
1865 	adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1866 	dmub_srv = adev->dm.dmub_srv;
1867 
1868 	if (!dmub_srv) {
1869 		DRM_ERROR("Failed to allocate DMUB service!\n");
1870 		return -ENOMEM;
1871 	}
1872 
1873 	memset(&create_params, 0, sizeof(create_params));
1874 	create_params.user_ctx = adev;
1875 	create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1876 	create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1877 	create_params.asic = dmub_asic;
1878 
1879 	/* Create the DMUB service. */
1880 	status = dmub_srv_create(dmub_srv, &create_params);
1881 	if (status != DMUB_STATUS_OK) {
1882 		DRM_ERROR("Error creating DMUB service: %d\n", status);
1883 		return -EINVAL;
1884 	}
1885 
1886 	/* Calculate the size of all the regions for the DMUB service. */
1887 	memset(&region_params, 0, sizeof(region_params));
1888 
1889 	region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1890 					PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1891 	region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1892 	region_params.vbios_size = adev->bios_size;
1893 	region_params.fw_bss_data = region_params.bss_data_size ?
1894 		adev->dm.dmub_fw->data +
1895 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1896 		le32_to_cpu(hdr->inst_const_bytes) : NULL;
1897 	region_params.fw_inst_const =
1898 		adev->dm.dmub_fw->data +
1899 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1900 		PSP_HEADER_BYTES;
1901 
1902 	status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1903 					   &region_info);
1904 
1905 	if (status != DMUB_STATUS_OK) {
1906 		DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1907 		return -EINVAL;
1908 	}
1909 
1910 	/*
1911 	 * Allocate a framebuffer based on the total size of all the regions.
1912 	 * TODO: Move this into GART.
1913 	 */
1914 	r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1915 				    AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1916 				    &adev->dm.dmub_bo_gpu_addr,
1917 				    &adev->dm.dmub_bo_cpu_addr);
1918 	if (r)
1919 		return r;
1920 
1921 	/* Rebase the regions on the framebuffer address. */
1922 	memset(&fb_params, 0, sizeof(fb_params));
1923 	fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1924 	fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1925 	fb_params.region_info = &region_info;
1926 
1927 	adev->dm.dmub_fb_info =
1928 		kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1929 	fb_info = adev->dm.dmub_fb_info;
1930 
1931 	if (!fb_info) {
1932 		DRM_ERROR(
1933 			"Failed to allocate framebuffer info for DMUB service!\n");
1934 		return -ENOMEM;
1935 	}
1936 
1937 	status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1938 	if (status != DMUB_STATUS_OK) {
1939 		DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1940 		return -EINVAL;
1941 	}
1942 
1943 	return 0;
1944 }
1945 
1946 static int dm_sw_init(void *handle)
1947 {
1948 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1949 	int r;
1950 
1951 	r = dm_dmub_sw_init(adev);
1952 	if (r)
1953 		return r;
1954 
1955 	return load_dmcu_fw(adev);
1956 }
1957 
1958 static int dm_sw_fini(void *handle)
1959 {
1960 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1961 
1962 	kfree(adev->dm.dmub_fb_info);
1963 	adev->dm.dmub_fb_info = NULL;
1964 
1965 	if (adev->dm.dmub_srv) {
1966 		dmub_srv_destroy(adev->dm.dmub_srv);
1967 		adev->dm.dmub_srv = NULL;
1968 	}
1969 
1970 	release_firmware(adev->dm.dmub_fw);
1971 	adev->dm.dmub_fw = NULL;
1972 
1973 	release_firmware(adev->dm.fw_dmcu);
1974 	adev->dm.fw_dmcu = NULL;
1975 
1976 	return 0;
1977 }
1978 
1979 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1980 {
1981 	struct amdgpu_dm_connector *aconnector;
1982 	struct drm_connector *connector;
1983 	struct drm_connector_list_iter iter;
1984 	int ret = 0;
1985 
1986 	drm_connector_list_iter_begin(dev, &iter);
1987 	drm_for_each_connector_iter(connector, &iter) {
1988 		aconnector = to_amdgpu_dm_connector(connector);
1989 		if (aconnector->dc_link->type == dc_connection_mst_branch &&
1990 		    aconnector->mst_mgr.aux) {
1991 			DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1992 					 aconnector,
1993 					 aconnector->base.base.id);
1994 
1995 			ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1996 			if (ret < 0) {
1997 				DRM_ERROR("DM_MST: Failed to start MST\n");
1998 				aconnector->dc_link->type =
1999 					dc_connection_single;
2000 				break;
2001 			}
2002 		}
2003 	}
2004 	drm_connector_list_iter_end(&iter);
2005 
2006 	return ret;
2007 }
2008 
2009 static int dm_late_init(void *handle)
2010 {
2011 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2012 
2013 	struct dmcu_iram_parameters params;
2014 	unsigned int linear_lut[16];
2015 	int i;
2016 	struct dmcu *dmcu = NULL;
2017 
2018 	dmcu = adev->dm.dc->res_pool->dmcu;
2019 
2020 	for (i = 0; i < 16; i++)
2021 		linear_lut[i] = 0xFFFF * i / 15;
2022 
2023 	params.set = 0;
2024 	params.backlight_ramping_override = false;
2025 	params.backlight_ramping_start = 0xCCCC;
2026 	params.backlight_ramping_reduction = 0xCCCCCCCC;
2027 	params.backlight_lut_array_size = 16;
2028 	params.backlight_lut_array = linear_lut;
2029 
2030 	/* Min backlight level after ABM reduction,  Don't allow below 1%
2031 	 * 0xFFFF x 0.01 = 0x28F
2032 	 */
2033 	params.min_abm_backlight = 0x28F;
2034 	/* In the case where abm is implemented on dmcub,
2035 	* dmcu object will be null.
2036 	* ABM 2.4 and up are implemented on dmcub.
2037 	*/
2038 	if (dmcu) {
2039 		if (!dmcu_load_iram(dmcu, params))
2040 			return -EINVAL;
2041 	} else if (adev->dm.dc->ctx->dmub_srv) {
2042 		struct dc_link *edp_links[MAX_NUM_EDP];
2043 		int edp_num;
2044 
2045 		get_edp_links(adev->dm.dc, edp_links, &edp_num);
2046 		for (i = 0; i < edp_num; i++) {
2047 			if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i))
2048 				return -EINVAL;
2049 		}
2050 	}
2051 
2052 	return detect_mst_link_for_all_connectors(adev_to_drm(adev));
2053 }
2054 
2055 static void s3_handle_mst(struct drm_device *dev, bool suspend)
2056 {
2057 	struct amdgpu_dm_connector *aconnector;
2058 	struct drm_connector *connector;
2059 	struct drm_connector_list_iter iter;
2060 	struct drm_dp_mst_topology_mgr *mgr;
2061 	int ret;
2062 	bool need_hotplug = false;
2063 
2064 	drm_connector_list_iter_begin(dev, &iter);
2065 	drm_for_each_connector_iter(connector, &iter) {
2066 		aconnector = to_amdgpu_dm_connector(connector);
2067 		if (aconnector->dc_link->type != dc_connection_mst_branch ||
2068 		    aconnector->mst_port)
2069 			continue;
2070 
2071 		mgr = &aconnector->mst_mgr;
2072 
2073 		if (suspend) {
2074 			drm_dp_mst_topology_mgr_suspend(mgr);
2075 		} else {
2076 			ret = drm_dp_mst_topology_mgr_resume(mgr, true);
2077 			if (ret < 0) {
2078 				drm_dp_mst_topology_mgr_set_mst(mgr, false);
2079 				need_hotplug = true;
2080 			}
2081 		}
2082 	}
2083 	drm_connector_list_iter_end(&iter);
2084 
2085 	if (need_hotplug)
2086 		drm_kms_helper_hotplug_event(dev);
2087 }
2088 
2089 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
2090 {
2091 	struct smu_context *smu = &adev->smu;
2092 	int ret = 0;
2093 
2094 	if (!is_support_sw_smu(adev))
2095 		return 0;
2096 
2097 	/* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
2098 	 * on window driver dc implementation.
2099 	 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
2100 	 * should be passed to smu during boot up and resume from s3.
2101 	 * boot up: dc calculate dcn watermark clock settings within dc_create,
2102 	 * dcn20_resource_construct
2103 	 * then call pplib functions below to pass the settings to smu:
2104 	 * smu_set_watermarks_for_clock_ranges
2105 	 * smu_set_watermarks_table
2106 	 * navi10_set_watermarks_table
2107 	 * smu_write_watermarks_table
2108 	 *
2109 	 * For Renoir, clock settings of dcn watermark are also fixed values.
2110 	 * dc has implemented different flow for window driver:
2111 	 * dc_hardware_init / dc_set_power_state
2112 	 * dcn10_init_hw
2113 	 * notify_wm_ranges
2114 	 * set_wm_ranges
2115 	 * -- Linux
2116 	 * smu_set_watermarks_for_clock_ranges
2117 	 * renoir_set_watermarks_table
2118 	 * smu_write_watermarks_table
2119 	 *
2120 	 * For Linux,
2121 	 * dc_hardware_init -> amdgpu_dm_init
2122 	 * dc_set_power_state --> dm_resume
2123 	 *
2124 	 * therefore, this function apply to navi10/12/14 but not Renoir
2125 	 * *
2126 	 */
2127 	switch (adev->ip_versions[DCE_HWIP][0]) {
2128 	case IP_VERSION(2, 0, 2):
2129 	case IP_VERSION(2, 0, 0):
2130 		break;
2131 	default:
2132 		return 0;
2133 	}
2134 
2135 	ret = smu_write_watermarks_table(smu);
2136 	if (ret) {
2137 		DRM_ERROR("Failed to update WMTABLE!\n");
2138 		return ret;
2139 	}
2140 
2141 	return 0;
2142 }
2143 
2144 /**
2145  * dm_hw_init() - Initialize DC device
2146  * @handle: The base driver device containing the amdgpu_dm device.
2147  *
2148  * Initialize the &struct amdgpu_display_manager device. This involves calling
2149  * the initializers of each DM component, then populating the struct with them.
2150  *
2151  * Although the function implies hardware initialization, both hardware and
2152  * software are initialized here. Splitting them out to their relevant init
2153  * hooks is a future TODO item.
2154  *
2155  * Some notable things that are initialized here:
2156  *
2157  * - Display Core, both software and hardware
2158  * - DC modules that we need (freesync and color management)
2159  * - DRM software states
2160  * - Interrupt sources and handlers
2161  * - Vblank support
2162  * - Debug FS entries, if enabled
2163  */
2164 static int dm_hw_init(void *handle)
2165 {
2166 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2167 	/* Create DAL display manager */
2168 	amdgpu_dm_init(adev);
2169 	amdgpu_dm_hpd_init(adev);
2170 
2171 	return 0;
2172 }
2173 
2174 /**
2175  * dm_hw_fini() - Teardown DC device
2176  * @handle: The base driver device containing the amdgpu_dm device.
2177  *
2178  * Teardown components within &struct amdgpu_display_manager that require
2179  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
2180  * were loaded. Also flush IRQ workqueues and disable them.
2181  */
2182 static int dm_hw_fini(void *handle)
2183 {
2184 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2185 
2186 	amdgpu_dm_hpd_fini(adev);
2187 
2188 	amdgpu_dm_irq_fini(adev);
2189 	amdgpu_dm_fini(adev);
2190 	return 0;
2191 }
2192 
2193 
2194 static int dm_enable_vblank(struct drm_crtc *crtc);
2195 static void dm_disable_vblank(struct drm_crtc *crtc);
2196 
2197 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
2198 				 struct dc_state *state, bool enable)
2199 {
2200 	enum dc_irq_source irq_source;
2201 	struct amdgpu_crtc *acrtc;
2202 	int rc = -EBUSY;
2203 	int i = 0;
2204 
2205 	for (i = 0; i < state->stream_count; i++) {
2206 		acrtc = get_crtc_by_otg_inst(
2207 				adev, state->stream_status[i].primary_otg_inst);
2208 
2209 		if (acrtc && state->stream_status[i].plane_count != 0) {
2210 			irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
2211 			rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
2212 			DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
2213 				      acrtc->crtc_id, enable ? "en" : "dis", rc);
2214 			if (rc)
2215 				DRM_WARN("Failed to %s pflip interrupts\n",
2216 					 enable ? "enable" : "disable");
2217 
2218 			if (enable) {
2219 				rc = dm_enable_vblank(&acrtc->base);
2220 				if (rc)
2221 					DRM_WARN("Failed to enable vblank interrupts\n");
2222 			} else {
2223 				dm_disable_vblank(&acrtc->base);
2224 			}
2225 
2226 		}
2227 	}
2228 
2229 }
2230 
2231 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
2232 {
2233 	struct dc_state *context = NULL;
2234 	enum dc_status res = DC_ERROR_UNEXPECTED;
2235 	int i;
2236 	struct dc_stream_state *del_streams[MAX_PIPES];
2237 	int del_streams_count = 0;
2238 
2239 	memset(del_streams, 0, sizeof(del_streams));
2240 
2241 	context = dc_create_state(dc);
2242 	if (context == NULL)
2243 		goto context_alloc_fail;
2244 
2245 	dc_resource_state_copy_construct_current(dc, context);
2246 
2247 	/* First remove from context all streams */
2248 	for (i = 0; i < context->stream_count; i++) {
2249 		struct dc_stream_state *stream = context->streams[i];
2250 
2251 		del_streams[del_streams_count++] = stream;
2252 	}
2253 
2254 	/* Remove all planes for removed streams and then remove the streams */
2255 	for (i = 0; i < del_streams_count; i++) {
2256 		if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
2257 			res = DC_FAIL_DETACH_SURFACES;
2258 			goto fail;
2259 		}
2260 
2261 		res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
2262 		if (res != DC_OK)
2263 			goto fail;
2264 	}
2265 
2266 
2267 	res = dc_validate_global_state(dc, context, false);
2268 
2269 	if (res != DC_OK) {
2270 		DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
2271 		goto fail;
2272 	}
2273 
2274 	res = dc_commit_state(dc, context);
2275 
2276 fail:
2277 	dc_release_state(context);
2278 
2279 context_alloc_fail:
2280 	return res;
2281 }
2282 
2283 static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm)
2284 {
2285 	int i;
2286 
2287 	if (dm->hpd_rx_offload_wq) {
2288 		for (i = 0; i < dm->dc->caps.max_links; i++)
2289 			flush_workqueue(dm->hpd_rx_offload_wq[i].wq);
2290 	}
2291 }
2292 
2293 static int dm_suspend(void *handle)
2294 {
2295 	struct amdgpu_device *adev = handle;
2296 	struct amdgpu_display_manager *dm = &adev->dm;
2297 	int ret = 0;
2298 
2299 	if (amdgpu_in_reset(adev)) {
2300 		mutex_lock(&dm->dc_lock);
2301 
2302 #if defined(CONFIG_DRM_AMD_DC_DCN)
2303 		dc_allow_idle_optimizations(adev->dm.dc, false);
2304 #endif
2305 
2306 		dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
2307 
2308 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
2309 
2310 		amdgpu_dm_commit_zero_streams(dm->dc);
2311 
2312 		amdgpu_dm_irq_suspend(adev);
2313 
2314 		hpd_rx_irq_work_suspend(dm);
2315 
2316 		return ret;
2317 	}
2318 
2319 	WARN_ON(adev->dm.cached_state);
2320 	adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
2321 
2322 	s3_handle_mst(adev_to_drm(adev), true);
2323 
2324 	amdgpu_dm_irq_suspend(adev);
2325 
2326 	hpd_rx_irq_work_suspend(dm);
2327 
2328 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
2329 
2330 	return 0;
2331 }
2332 
2333 static struct amdgpu_dm_connector *
2334 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
2335 					     struct drm_crtc *crtc)
2336 {
2337 	uint32_t i;
2338 	struct drm_connector_state *new_con_state;
2339 	struct drm_connector *connector;
2340 	struct drm_crtc *crtc_from_state;
2341 
2342 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
2343 		crtc_from_state = new_con_state->crtc;
2344 
2345 		if (crtc_from_state == crtc)
2346 			return to_amdgpu_dm_connector(connector);
2347 	}
2348 
2349 	return NULL;
2350 }
2351 
2352 static void emulated_link_detect(struct dc_link *link)
2353 {
2354 	struct dc_sink_init_data sink_init_data = { 0 };
2355 	struct display_sink_capability sink_caps = { 0 };
2356 	enum dc_edid_status edid_status;
2357 	struct dc_context *dc_ctx = link->ctx;
2358 	struct dc_sink *sink = NULL;
2359 	struct dc_sink *prev_sink = NULL;
2360 
2361 	link->type = dc_connection_none;
2362 	prev_sink = link->local_sink;
2363 
2364 	if (prev_sink)
2365 		dc_sink_release(prev_sink);
2366 
2367 	switch (link->connector_signal) {
2368 	case SIGNAL_TYPE_HDMI_TYPE_A: {
2369 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2370 		sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2371 		break;
2372 	}
2373 
2374 	case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2375 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2376 		sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2377 		break;
2378 	}
2379 
2380 	case SIGNAL_TYPE_DVI_DUAL_LINK: {
2381 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2382 		sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2383 		break;
2384 	}
2385 
2386 	case SIGNAL_TYPE_LVDS: {
2387 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2388 		sink_caps.signal = SIGNAL_TYPE_LVDS;
2389 		break;
2390 	}
2391 
2392 	case SIGNAL_TYPE_EDP: {
2393 		sink_caps.transaction_type =
2394 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2395 		sink_caps.signal = SIGNAL_TYPE_EDP;
2396 		break;
2397 	}
2398 
2399 	case SIGNAL_TYPE_DISPLAY_PORT: {
2400 		sink_caps.transaction_type =
2401 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2402 		sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2403 		break;
2404 	}
2405 
2406 	default:
2407 		DC_ERROR("Invalid connector type! signal:%d\n",
2408 			link->connector_signal);
2409 		return;
2410 	}
2411 
2412 	sink_init_data.link = link;
2413 	sink_init_data.sink_signal = sink_caps.signal;
2414 
2415 	sink = dc_sink_create(&sink_init_data);
2416 	if (!sink) {
2417 		DC_ERROR("Failed to create sink!\n");
2418 		return;
2419 	}
2420 
2421 	/* dc_sink_create returns a new reference */
2422 	link->local_sink = sink;
2423 
2424 	edid_status = dm_helpers_read_local_edid(
2425 			link->ctx,
2426 			link,
2427 			sink);
2428 
2429 	if (edid_status != EDID_OK)
2430 		DC_ERROR("Failed to read EDID");
2431 
2432 }
2433 
2434 static void dm_gpureset_commit_state(struct dc_state *dc_state,
2435 				     struct amdgpu_display_manager *dm)
2436 {
2437 	struct {
2438 		struct dc_surface_update surface_updates[MAX_SURFACES];
2439 		struct dc_plane_info plane_infos[MAX_SURFACES];
2440 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
2441 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2442 		struct dc_stream_update stream_update;
2443 	} * bundle;
2444 	int k, m;
2445 
2446 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2447 
2448 	if (!bundle) {
2449 		dm_error("Failed to allocate update bundle\n");
2450 		goto cleanup;
2451 	}
2452 
2453 	for (k = 0; k < dc_state->stream_count; k++) {
2454 		bundle->stream_update.stream = dc_state->streams[k];
2455 
2456 		for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2457 			bundle->surface_updates[m].surface =
2458 				dc_state->stream_status->plane_states[m];
2459 			bundle->surface_updates[m].surface->force_full_update =
2460 				true;
2461 		}
2462 		dc_commit_updates_for_stream(
2463 			dm->dc, bundle->surface_updates,
2464 			dc_state->stream_status->plane_count,
2465 			dc_state->streams[k], &bundle->stream_update, dc_state);
2466 	}
2467 
2468 cleanup:
2469 	kfree(bundle);
2470 
2471 	return;
2472 }
2473 
2474 static void dm_set_dpms_off(struct dc_link *link, struct dm_crtc_state *acrtc_state)
2475 {
2476 	struct dc_stream_state *stream_state;
2477 	struct amdgpu_dm_connector *aconnector = link->priv;
2478 	struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2479 	struct dc_stream_update stream_update;
2480 	bool dpms_off = true;
2481 
2482 	memset(&stream_update, 0, sizeof(stream_update));
2483 	stream_update.dpms_off = &dpms_off;
2484 
2485 	mutex_lock(&adev->dm.dc_lock);
2486 	stream_state = dc_stream_find_from_link(link);
2487 
2488 	if (stream_state == NULL) {
2489 		DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2490 		mutex_unlock(&adev->dm.dc_lock);
2491 		return;
2492 	}
2493 
2494 	stream_update.stream = stream_state;
2495 	acrtc_state->force_dpms_off = true;
2496 	dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
2497 				     stream_state, &stream_update,
2498 				     stream_state->ctx->dc->current_state);
2499 	mutex_unlock(&adev->dm.dc_lock);
2500 }
2501 
2502 static int dm_resume(void *handle)
2503 {
2504 	struct amdgpu_device *adev = handle;
2505 	struct drm_device *ddev = adev_to_drm(adev);
2506 	struct amdgpu_display_manager *dm = &adev->dm;
2507 	struct amdgpu_dm_connector *aconnector;
2508 	struct drm_connector *connector;
2509 	struct drm_connector_list_iter iter;
2510 	struct drm_crtc *crtc;
2511 	struct drm_crtc_state *new_crtc_state;
2512 	struct dm_crtc_state *dm_new_crtc_state;
2513 	struct drm_plane *plane;
2514 	struct drm_plane_state *new_plane_state;
2515 	struct dm_plane_state *dm_new_plane_state;
2516 	struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2517 	enum dc_connection_type new_connection_type = dc_connection_none;
2518 	struct dc_state *dc_state;
2519 	int i, r, j;
2520 
2521 	if (amdgpu_in_reset(adev)) {
2522 		dc_state = dm->cached_dc_state;
2523 
2524 		r = dm_dmub_hw_init(adev);
2525 		if (r)
2526 			DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2527 
2528 		dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2529 		dc_resume(dm->dc);
2530 
2531 		amdgpu_dm_irq_resume_early(adev);
2532 
2533 		for (i = 0; i < dc_state->stream_count; i++) {
2534 			dc_state->streams[i]->mode_changed = true;
2535 			for (j = 0; j < dc_state->stream_status->plane_count; j++) {
2536 				dc_state->stream_status->plane_states[j]->update_flags.raw
2537 					= 0xffffffff;
2538 			}
2539 		}
2540 #if defined(CONFIG_DRM_AMD_DC_DCN)
2541 		/*
2542 		 * Resource allocation happens for link encoders for newer ASIC in
2543 		 * dc_validate_global_state, so we need to revalidate it.
2544 		 *
2545 		 * This shouldn't fail (it passed once before), so warn if it does.
2546 		 */
2547 		WARN_ON(dc_validate_global_state(dm->dc, dc_state, false) != DC_OK);
2548 #endif
2549 
2550 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
2551 
2552 		dm_gpureset_commit_state(dm->cached_dc_state, dm);
2553 
2554 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2555 
2556 		dc_release_state(dm->cached_dc_state);
2557 		dm->cached_dc_state = NULL;
2558 
2559 		amdgpu_dm_irq_resume_late(adev);
2560 
2561 		mutex_unlock(&dm->dc_lock);
2562 
2563 		return 0;
2564 	}
2565 	/* Recreate dc_state - DC invalidates it when setting power state to S3. */
2566 	dc_release_state(dm_state->context);
2567 	dm_state->context = dc_create_state(dm->dc);
2568 	/* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2569 	dc_resource_state_construct(dm->dc, dm_state->context);
2570 
2571 	/* Before powering on DC we need to re-initialize DMUB. */
2572 	r = dm_dmub_hw_init(adev);
2573 	if (r)
2574 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2575 
2576 	/* power on hardware */
2577 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2578 
2579 	/* program HPD filter */
2580 	dc_resume(dm->dc);
2581 
2582 	/*
2583 	 * early enable HPD Rx IRQ, should be done before set mode as short
2584 	 * pulse interrupts are used for MST
2585 	 */
2586 	amdgpu_dm_irq_resume_early(adev);
2587 
2588 	/* On resume we need to rewrite the MSTM control bits to enable MST*/
2589 	s3_handle_mst(ddev, false);
2590 
2591 	/* Do detection*/
2592 	drm_connector_list_iter_begin(ddev, &iter);
2593 	drm_for_each_connector_iter(connector, &iter) {
2594 		aconnector = to_amdgpu_dm_connector(connector);
2595 
2596 		/*
2597 		 * this is the case when traversing through already created
2598 		 * MST connectors, should be skipped
2599 		 */
2600 		if (aconnector->mst_port)
2601 			continue;
2602 
2603 		mutex_lock(&aconnector->hpd_lock);
2604 		if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2605 			DRM_ERROR("KMS: Failed to detect connector\n");
2606 
2607 		if (aconnector->base.force && new_connection_type == dc_connection_none)
2608 			emulated_link_detect(aconnector->dc_link);
2609 		else
2610 			dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2611 
2612 		if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2613 			aconnector->fake_enable = false;
2614 
2615 		if (aconnector->dc_sink)
2616 			dc_sink_release(aconnector->dc_sink);
2617 		aconnector->dc_sink = NULL;
2618 		amdgpu_dm_update_connector_after_detect(aconnector);
2619 		mutex_unlock(&aconnector->hpd_lock);
2620 	}
2621 	drm_connector_list_iter_end(&iter);
2622 
2623 	/* Force mode set in atomic commit */
2624 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2625 		new_crtc_state->active_changed = true;
2626 
2627 	/*
2628 	 * atomic_check is expected to create the dc states. We need to release
2629 	 * them here, since they were duplicated as part of the suspend
2630 	 * procedure.
2631 	 */
2632 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2633 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2634 		if (dm_new_crtc_state->stream) {
2635 			WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2636 			dc_stream_release(dm_new_crtc_state->stream);
2637 			dm_new_crtc_state->stream = NULL;
2638 		}
2639 	}
2640 
2641 	for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2642 		dm_new_plane_state = to_dm_plane_state(new_plane_state);
2643 		if (dm_new_plane_state->dc_state) {
2644 			WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2645 			dc_plane_state_release(dm_new_plane_state->dc_state);
2646 			dm_new_plane_state->dc_state = NULL;
2647 		}
2648 	}
2649 
2650 	drm_atomic_helper_resume(ddev, dm->cached_state);
2651 
2652 	dm->cached_state = NULL;
2653 
2654 	amdgpu_dm_irq_resume_late(adev);
2655 
2656 	amdgpu_dm_smu_write_watermarks_table(adev);
2657 
2658 	return 0;
2659 }
2660 
2661 /**
2662  * DOC: DM Lifecycle
2663  *
2664  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2665  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2666  * the base driver's device list to be initialized and torn down accordingly.
2667  *
2668  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2669  */
2670 
2671 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2672 	.name = "dm",
2673 	.early_init = dm_early_init,
2674 	.late_init = dm_late_init,
2675 	.sw_init = dm_sw_init,
2676 	.sw_fini = dm_sw_fini,
2677 	.early_fini = amdgpu_dm_early_fini,
2678 	.hw_init = dm_hw_init,
2679 	.hw_fini = dm_hw_fini,
2680 	.suspend = dm_suspend,
2681 	.resume = dm_resume,
2682 	.is_idle = dm_is_idle,
2683 	.wait_for_idle = dm_wait_for_idle,
2684 	.check_soft_reset = dm_check_soft_reset,
2685 	.soft_reset = dm_soft_reset,
2686 	.set_clockgating_state = dm_set_clockgating_state,
2687 	.set_powergating_state = dm_set_powergating_state,
2688 };
2689 
2690 const struct amdgpu_ip_block_version dm_ip_block =
2691 {
2692 	.type = AMD_IP_BLOCK_TYPE_DCE,
2693 	.major = 1,
2694 	.minor = 0,
2695 	.rev = 0,
2696 	.funcs = &amdgpu_dm_funcs,
2697 };
2698 
2699 
2700 /**
2701  * DOC: atomic
2702  *
2703  * *WIP*
2704  */
2705 
2706 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2707 	.fb_create = amdgpu_display_user_framebuffer_create,
2708 	.get_format_info = amd_get_format_info,
2709 	.output_poll_changed = drm_fb_helper_output_poll_changed,
2710 	.atomic_check = amdgpu_dm_atomic_check,
2711 	.atomic_commit = drm_atomic_helper_commit,
2712 };
2713 
2714 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2715 	.atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2716 };
2717 
2718 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2719 {
2720 	u32 max_cll, min_cll, max, min, q, r;
2721 	struct amdgpu_dm_backlight_caps *caps;
2722 	struct amdgpu_display_manager *dm;
2723 	struct drm_connector *conn_base;
2724 	struct amdgpu_device *adev;
2725 	struct dc_link *link = NULL;
2726 	static const u8 pre_computed_values[] = {
2727 		50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2728 		71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2729 	int i;
2730 
2731 	if (!aconnector || !aconnector->dc_link)
2732 		return;
2733 
2734 	link = aconnector->dc_link;
2735 	if (link->connector_signal != SIGNAL_TYPE_EDP)
2736 		return;
2737 
2738 	conn_base = &aconnector->base;
2739 	adev = drm_to_adev(conn_base->dev);
2740 	dm = &adev->dm;
2741 	for (i = 0; i < dm->num_of_edps; i++) {
2742 		if (link == dm->backlight_link[i])
2743 			break;
2744 	}
2745 	if (i >= dm->num_of_edps)
2746 		return;
2747 	caps = &dm->backlight_caps[i];
2748 	caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2749 	caps->aux_support = false;
2750 	max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2751 	min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2752 
2753 	if (caps->ext_caps->bits.oled == 1 /*||
2754 	    caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2755 	    caps->ext_caps->bits.hdr_aux_backlight_control == 1*/)
2756 		caps->aux_support = true;
2757 
2758 	if (amdgpu_backlight == 0)
2759 		caps->aux_support = false;
2760 	else if (amdgpu_backlight == 1)
2761 		caps->aux_support = true;
2762 
2763 	/* From the specification (CTA-861-G), for calculating the maximum
2764 	 * luminance we need to use:
2765 	 *	Luminance = 50*2**(CV/32)
2766 	 * Where CV is a one-byte value.
2767 	 * For calculating this expression we may need float point precision;
2768 	 * to avoid this complexity level, we take advantage that CV is divided
2769 	 * by a constant. From the Euclids division algorithm, we know that CV
2770 	 * can be written as: CV = 32*q + r. Next, we replace CV in the
2771 	 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2772 	 * need to pre-compute the value of r/32. For pre-computing the values
2773 	 * We just used the following Ruby line:
2774 	 *	(0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2775 	 * The results of the above expressions can be verified at
2776 	 * pre_computed_values.
2777 	 */
2778 	q = max_cll >> 5;
2779 	r = max_cll % 32;
2780 	max = (1 << q) * pre_computed_values[r];
2781 
2782 	// min luminance: maxLum * (CV/255)^2 / 100
2783 	q = DIV_ROUND_CLOSEST(min_cll, 255);
2784 	min = max * DIV_ROUND_CLOSEST((q * q), 100);
2785 
2786 	caps->aux_max_input_signal = max;
2787 	caps->aux_min_input_signal = min;
2788 }
2789 
2790 void amdgpu_dm_update_connector_after_detect(
2791 		struct amdgpu_dm_connector *aconnector)
2792 {
2793 	struct drm_connector *connector = &aconnector->base;
2794 	struct drm_device *dev = connector->dev;
2795 	struct dc_sink *sink;
2796 
2797 	/* MST handled by drm_mst framework */
2798 	if (aconnector->mst_mgr.mst_state == true)
2799 		return;
2800 
2801 	sink = aconnector->dc_link->local_sink;
2802 	if (sink)
2803 		dc_sink_retain(sink);
2804 
2805 	/*
2806 	 * Edid mgmt connector gets first update only in mode_valid hook and then
2807 	 * the connector sink is set to either fake or physical sink depends on link status.
2808 	 * Skip if already done during boot.
2809 	 */
2810 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2811 			&& aconnector->dc_em_sink) {
2812 
2813 		/*
2814 		 * For S3 resume with headless use eml_sink to fake stream
2815 		 * because on resume connector->sink is set to NULL
2816 		 */
2817 		mutex_lock(&dev->mode_config.mutex);
2818 
2819 		if (sink) {
2820 			if (aconnector->dc_sink) {
2821 				amdgpu_dm_update_freesync_caps(connector, NULL);
2822 				/*
2823 				 * retain and release below are used to
2824 				 * bump up refcount for sink because the link doesn't point
2825 				 * to it anymore after disconnect, so on next crtc to connector
2826 				 * reshuffle by UMD we will get into unwanted dc_sink release
2827 				 */
2828 				dc_sink_release(aconnector->dc_sink);
2829 			}
2830 			aconnector->dc_sink = sink;
2831 			dc_sink_retain(aconnector->dc_sink);
2832 			amdgpu_dm_update_freesync_caps(connector,
2833 					aconnector->edid);
2834 		} else {
2835 			amdgpu_dm_update_freesync_caps(connector, NULL);
2836 			if (!aconnector->dc_sink) {
2837 				aconnector->dc_sink = aconnector->dc_em_sink;
2838 				dc_sink_retain(aconnector->dc_sink);
2839 			}
2840 		}
2841 
2842 		mutex_unlock(&dev->mode_config.mutex);
2843 
2844 		if (sink)
2845 			dc_sink_release(sink);
2846 		return;
2847 	}
2848 
2849 	/*
2850 	 * TODO: temporary guard to look for proper fix
2851 	 * if this sink is MST sink, we should not do anything
2852 	 */
2853 	if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2854 		dc_sink_release(sink);
2855 		return;
2856 	}
2857 
2858 	if (aconnector->dc_sink == sink) {
2859 		/*
2860 		 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2861 		 * Do nothing!!
2862 		 */
2863 		DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2864 				aconnector->connector_id);
2865 		if (sink)
2866 			dc_sink_release(sink);
2867 		return;
2868 	}
2869 
2870 	DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2871 		aconnector->connector_id, aconnector->dc_sink, sink);
2872 
2873 	mutex_lock(&dev->mode_config.mutex);
2874 
2875 	/*
2876 	 * 1. Update status of the drm connector
2877 	 * 2. Send an event and let userspace tell us what to do
2878 	 */
2879 	if (sink) {
2880 		/*
2881 		 * TODO: check if we still need the S3 mode update workaround.
2882 		 * If yes, put it here.
2883 		 */
2884 		if (aconnector->dc_sink) {
2885 			amdgpu_dm_update_freesync_caps(connector, NULL);
2886 			dc_sink_release(aconnector->dc_sink);
2887 		}
2888 
2889 		aconnector->dc_sink = sink;
2890 		dc_sink_retain(aconnector->dc_sink);
2891 		if (sink->dc_edid.length == 0) {
2892 			aconnector->edid = NULL;
2893 			if (aconnector->dc_link->aux_mode) {
2894 				drm_dp_cec_unset_edid(
2895 					&aconnector->dm_dp_aux.aux);
2896 			}
2897 		} else {
2898 			aconnector->edid =
2899 				(struct edid *)sink->dc_edid.raw_edid;
2900 
2901 			drm_connector_update_edid_property(connector,
2902 							   aconnector->edid);
2903 			if (aconnector->dc_link->aux_mode)
2904 				drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2905 						    aconnector->edid);
2906 		}
2907 
2908 		amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2909 		update_connector_ext_caps(aconnector);
2910 	} else {
2911 		drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2912 		amdgpu_dm_update_freesync_caps(connector, NULL);
2913 		drm_connector_update_edid_property(connector, NULL);
2914 		aconnector->num_modes = 0;
2915 		dc_sink_release(aconnector->dc_sink);
2916 		aconnector->dc_sink = NULL;
2917 		aconnector->edid = NULL;
2918 #ifdef CONFIG_DRM_AMD_DC_HDCP
2919 		/* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2920 		if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2921 			connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2922 #endif
2923 	}
2924 
2925 	mutex_unlock(&dev->mode_config.mutex);
2926 
2927 	update_subconnector_property(aconnector);
2928 
2929 	if (sink)
2930 		dc_sink_release(sink);
2931 }
2932 
2933 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
2934 {
2935 	struct drm_connector *connector = &aconnector->base;
2936 	struct drm_device *dev = connector->dev;
2937 	enum dc_connection_type new_connection_type = dc_connection_none;
2938 	struct amdgpu_device *adev = drm_to_adev(dev);
2939 	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
2940 	struct dm_crtc_state *dm_crtc_state = NULL;
2941 
2942 	if (adev->dm.disable_hpd_irq)
2943 		return;
2944 
2945 	if (dm_con_state->base.state && dm_con_state->base.crtc)
2946 		dm_crtc_state = to_dm_crtc_state(drm_atomic_get_crtc_state(
2947 					dm_con_state->base.state,
2948 					dm_con_state->base.crtc));
2949 	/*
2950 	 * In case of failure or MST no need to update connector status or notify the OS
2951 	 * since (for MST case) MST does this in its own context.
2952 	 */
2953 	mutex_lock(&aconnector->hpd_lock);
2954 
2955 #ifdef CONFIG_DRM_AMD_DC_HDCP
2956 	if (adev->dm.hdcp_workqueue) {
2957 		hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2958 		dm_con_state->update_hdcp = true;
2959 	}
2960 #endif
2961 	if (aconnector->fake_enable)
2962 		aconnector->fake_enable = false;
2963 
2964 	if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2965 		DRM_ERROR("KMS: Failed to detect connector\n");
2966 
2967 	if (aconnector->base.force && new_connection_type == dc_connection_none) {
2968 		emulated_link_detect(aconnector->dc_link);
2969 
2970 		drm_modeset_lock_all(dev);
2971 		dm_restore_drm_connector_state(dev, connector);
2972 		drm_modeset_unlock_all(dev);
2973 
2974 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2975 			drm_kms_helper_hotplug_event(dev);
2976 
2977 	} else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2978 		if (new_connection_type == dc_connection_none &&
2979 		    aconnector->dc_link->type == dc_connection_none &&
2980 		    dm_crtc_state)
2981 			dm_set_dpms_off(aconnector->dc_link, dm_crtc_state);
2982 
2983 		amdgpu_dm_update_connector_after_detect(aconnector);
2984 
2985 		drm_modeset_lock_all(dev);
2986 		dm_restore_drm_connector_state(dev, connector);
2987 		drm_modeset_unlock_all(dev);
2988 
2989 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2990 			drm_kms_helper_hotplug_event(dev);
2991 	}
2992 	mutex_unlock(&aconnector->hpd_lock);
2993 
2994 }
2995 
2996 static void handle_hpd_irq(void *param)
2997 {
2998 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2999 
3000 	handle_hpd_irq_helper(aconnector);
3001 
3002 }
3003 
3004 static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
3005 {
3006 	uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
3007 	uint8_t dret;
3008 	bool new_irq_handled = false;
3009 	int dpcd_addr;
3010 	int dpcd_bytes_to_read;
3011 
3012 	const int max_process_count = 30;
3013 	int process_count = 0;
3014 
3015 	const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
3016 
3017 	if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
3018 		dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
3019 		/* DPCD 0x200 - 0x201 for downstream IRQ */
3020 		dpcd_addr = DP_SINK_COUNT;
3021 	} else {
3022 		dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
3023 		/* DPCD 0x2002 - 0x2005 for downstream IRQ */
3024 		dpcd_addr = DP_SINK_COUNT_ESI;
3025 	}
3026 
3027 	dret = drm_dp_dpcd_read(
3028 		&aconnector->dm_dp_aux.aux,
3029 		dpcd_addr,
3030 		esi,
3031 		dpcd_bytes_to_read);
3032 
3033 	while (dret == dpcd_bytes_to_read &&
3034 		process_count < max_process_count) {
3035 		uint8_t retry;
3036 		dret = 0;
3037 
3038 		process_count++;
3039 
3040 		DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
3041 		/* handle HPD short pulse irq */
3042 		if (aconnector->mst_mgr.mst_state)
3043 			drm_dp_mst_hpd_irq(
3044 				&aconnector->mst_mgr,
3045 				esi,
3046 				&new_irq_handled);
3047 
3048 		if (new_irq_handled) {
3049 			/* ACK at DPCD to notify down stream */
3050 			const int ack_dpcd_bytes_to_write =
3051 				dpcd_bytes_to_read - 1;
3052 
3053 			for (retry = 0; retry < 3; retry++) {
3054 				uint8_t wret;
3055 
3056 				wret = drm_dp_dpcd_write(
3057 					&aconnector->dm_dp_aux.aux,
3058 					dpcd_addr + 1,
3059 					&esi[1],
3060 					ack_dpcd_bytes_to_write);
3061 				if (wret == ack_dpcd_bytes_to_write)
3062 					break;
3063 			}
3064 
3065 			/* check if there is new irq to be handled */
3066 			dret = drm_dp_dpcd_read(
3067 				&aconnector->dm_dp_aux.aux,
3068 				dpcd_addr,
3069 				esi,
3070 				dpcd_bytes_to_read);
3071 
3072 			new_irq_handled = false;
3073 		} else {
3074 			break;
3075 		}
3076 	}
3077 
3078 	if (process_count == max_process_count)
3079 		DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
3080 }
3081 
3082 static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq,
3083 							union hpd_irq_data hpd_irq_data)
3084 {
3085 	struct hpd_rx_irq_offload_work *offload_work =
3086 				kzalloc(sizeof(*offload_work), GFP_KERNEL);
3087 
3088 	if (!offload_work) {
3089 		DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n");
3090 		return;
3091 	}
3092 
3093 	INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work);
3094 	offload_work->data = hpd_irq_data;
3095 	offload_work->offload_wq = offload_wq;
3096 
3097 	queue_work(offload_wq->wq, &offload_work->work);
3098 	DRM_DEBUG_KMS("queue work to handle hpd_rx offload work");
3099 }
3100 
3101 static void handle_hpd_rx_irq(void *param)
3102 {
3103 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3104 	struct drm_connector *connector = &aconnector->base;
3105 	struct drm_device *dev = connector->dev;
3106 	struct dc_link *dc_link = aconnector->dc_link;
3107 	bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
3108 	bool result = false;
3109 	enum dc_connection_type new_connection_type = dc_connection_none;
3110 	struct amdgpu_device *adev = drm_to_adev(dev);
3111 	union hpd_irq_data hpd_irq_data;
3112 	bool link_loss = false;
3113 	bool has_left_work = false;
3114 	int idx = aconnector->base.index;
3115 	struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx];
3116 
3117 	memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
3118 
3119 	if (adev->dm.disable_hpd_irq)
3120 		return;
3121 
3122 	/*
3123 	 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
3124 	 * conflict, after implement i2c helper, this mutex should be
3125 	 * retired.
3126 	 */
3127 	mutex_lock(&aconnector->hpd_lock);
3128 
3129 	result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data,
3130 						&link_loss, true, &has_left_work);
3131 
3132 	if (!has_left_work)
3133 		goto out;
3134 
3135 	if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
3136 		schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3137 		goto out;
3138 	}
3139 
3140 	if (dc_link_dp_allow_hpd_rx_irq(dc_link)) {
3141 		if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
3142 			hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
3143 			dm_handle_mst_sideband_msg(aconnector);
3144 			goto out;
3145 		}
3146 
3147 		if (link_loss) {
3148 			bool skip = false;
3149 
3150 			spin_lock(&offload_wq->offload_lock);
3151 			skip = offload_wq->is_handling_link_loss;
3152 
3153 			if (!skip)
3154 				offload_wq->is_handling_link_loss = true;
3155 
3156 			spin_unlock(&offload_wq->offload_lock);
3157 
3158 			if (!skip)
3159 				schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3160 
3161 			goto out;
3162 		}
3163 	}
3164 
3165 out:
3166 	if (result && !is_mst_root_connector) {
3167 		/* Downstream Port status changed. */
3168 		if (!dc_link_detect_sink(dc_link, &new_connection_type))
3169 			DRM_ERROR("KMS: Failed to detect connector\n");
3170 
3171 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
3172 			emulated_link_detect(dc_link);
3173 
3174 			if (aconnector->fake_enable)
3175 				aconnector->fake_enable = false;
3176 
3177 			amdgpu_dm_update_connector_after_detect(aconnector);
3178 
3179 
3180 			drm_modeset_lock_all(dev);
3181 			dm_restore_drm_connector_state(dev, connector);
3182 			drm_modeset_unlock_all(dev);
3183 
3184 			drm_kms_helper_hotplug_event(dev);
3185 		} else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
3186 
3187 			if (aconnector->fake_enable)
3188 				aconnector->fake_enable = false;
3189 
3190 			amdgpu_dm_update_connector_after_detect(aconnector);
3191 
3192 
3193 			drm_modeset_lock_all(dev);
3194 			dm_restore_drm_connector_state(dev, connector);
3195 			drm_modeset_unlock_all(dev);
3196 
3197 			drm_kms_helper_hotplug_event(dev);
3198 		}
3199 	}
3200 #ifdef CONFIG_DRM_AMD_DC_HDCP
3201 	if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
3202 		if (adev->dm.hdcp_workqueue)
3203 			hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
3204 	}
3205 #endif
3206 
3207 	if (dc_link->type != dc_connection_mst_branch)
3208 		drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
3209 
3210 	mutex_unlock(&aconnector->hpd_lock);
3211 }
3212 
3213 static void register_hpd_handlers(struct amdgpu_device *adev)
3214 {
3215 	struct drm_device *dev = adev_to_drm(adev);
3216 	struct drm_connector *connector;
3217 	struct amdgpu_dm_connector *aconnector;
3218 	const struct dc_link *dc_link;
3219 	struct dc_interrupt_params int_params = {0};
3220 
3221 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3222 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3223 
3224 	list_for_each_entry(connector,
3225 			&dev->mode_config.connector_list, head)	{
3226 
3227 		aconnector = to_amdgpu_dm_connector(connector);
3228 		dc_link = aconnector->dc_link;
3229 
3230 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
3231 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3232 			int_params.irq_source = dc_link->irq_source_hpd;
3233 
3234 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
3235 					handle_hpd_irq,
3236 					(void *) aconnector);
3237 		}
3238 
3239 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
3240 
3241 			/* Also register for DP short pulse (hpd_rx). */
3242 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3243 			int_params.irq_source =	dc_link->irq_source_hpd_rx;
3244 
3245 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
3246 					handle_hpd_rx_irq,
3247 					(void *) aconnector);
3248 
3249 			if (adev->dm.hpd_rx_offload_wq)
3250 				adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
3251 					aconnector;
3252 		}
3253 	}
3254 }
3255 
3256 #if defined(CONFIG_DRM_AMD_DC_SI)
3257 /* Register IRQ sources and initialize IRQ callbacks */
3258 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
3259 {
3260 	struct dc *dc = adev->dm.dc;
3261 	struct common_irq_params *c_irq_params;
3262 	struct dc_interrupt_params int_params = {0};
3263 	int r;
3264 	int i;
3265 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3266 
3267 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3268 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3269 
3270 	/*
3271 	 * Actions of amdgpu_irq_add_id():
3272 	 * 1. Register a set() function with base driver.
3273 	 *    Base driver will call set() function to enable/disable an
3274 	 *    interrupt in DC hardware.
3275 	 * 2. Register amdgpu_dm_irq_handler().
3276 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3277 	 *    coming from DC hardware.
3278 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3279 	 *    for acknowledging and handling. */
3280 
3281 	/* Use VBLANK interrupt */
3282 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
3283 		r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
3284 		if (r) {
3285 			DRM_ERROR("Failed to add crtc irq id!\n");
3286 			return r;
3287 		}
3288 
3289 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3290 		int_params.irq_source =
3291 			dc_interrupt_to_irq_source(dc, i+1 , 0);
3292 
3293 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3294 
3295 		c_irq_params->adev = adev;
3296 		c_irq_params->irq_src = int_params.irq_source;
3297 
3298 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3299 				dm_crtc_high_irq, c_irq_params);
3300 	}
3301 
3302 	/* Use GRPH_PFLIP interrupt */
3303 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3304 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3305 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3306 		if (r) {
3307 			DRM_ERROR("Failed to add page flip irq id!\n");
3308 			return r;
3309 		}
3310 
3311 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3312 		int_params.irq_source =
3313 			dc_interrupt_to_irq_source(dc, i, 0);
3314 
3315 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3316 
3317 		c_irq_params->adev = adev;
3318 		c_irq_params->irq_src = int_params.irq_source;
3319 
3320 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3321 				dm_pflip_high_irq, c_irq_params);
3322 
3323 	}
3324 
3325 	/* HPD */
3326 	r = amdgpu_irq_add_id(adev, client_id,
3327 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3328 	if (r) {
3329 		DRM_ERROR("Failed to add hpd irq id!\n");
3330 		return r;
3331 	}
3332 
3333 	register_hpd_handlers(adev);
3334 
3335 	return 0;
3336 }
3337 #endif
3338 
3339 /* Register IRQ sources and initialize IRQ callbacks */
3340 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
3341 {
3342 	struct dc *dc = adev->dm.dc;
3343 	struct common_irq_params *c_irq_params;
3344 	struct dc_interrupt_params int_params = {0};
3345 	int r;
3346 	int i;
3347 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3348 
3349 	if (adev->family >= AMDGPU_FAMILY_AI)
3350 		client_id = SOC15_IH_CLIENTID_DCE;
3351 
3352 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3353 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3354 
3355 	/*
3356 	 * Actions of amdgpu_irq_add_id():
3357 	 * 1. Register a set() function with base driver.
3358 	 *    Base driver will call set() function to enable/disable an
3359 	 *    interrupt in DC hardware.
3360 	 * 2. Register amdgpu_dm_irq_handler().
3361 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3362 	 *    coming from DC hardware.
3363 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3364 	 *    for acknowledging and handling. */
3365 
3366 	/* Use VBLANK interrupt */
3367 	for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
3368 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
3369 		if (r) {
3370 			DRM_ERROR("Failed to add crtc irq id!\n");
3371 			return r;
3372 		}
3373 
3374 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3375 		int_params.irq_source =
3376 			dc_interrupt_to_irq_source(dc, i, 0);
3377 
3378 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3379 
3380 		c_irq_params->adev = adev;
3381 		c_irq_params->irq_src = int_params.irq_source;
3382 
3383 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3384 				dm_crtc_high_irq, c_irq_params);
3385 	}
3386 
3387 	/* Use VUPDATE interrupt */
3388 	for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
3389 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
3390 		if (r) {
3391 			DRM_ERROR("Failed to add vupdate irq id!\n");
3392 			return r;
3393 		}
3394 
3395 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3396 		int_params.irq_source =
3397 			dc_interrupt_to_irq_source(dc, i, 0);
3398 
3399 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3400 
3401 		c_irq_params->adev = adev;
3402 		c_irq_params->irq_src = int_params.irq_source;
3403 
3404 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3405 				dm_vupdate_high_irq, c_irq_params);
3406 	}
3407 
3408 	/* Use GRPH_PFLIP interrupt */
3409 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3410 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3411 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3412 		if (r) {
3413 			DRM_ERROR("Failed to add page flip irq id!\n");
3414 			return r;
3415 		}
3416 
3417 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3418 		int_params.irq_source =
3419 			dc_interrupt_to_irq_source(dc, i, 0);
3420 
3421 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3422 
3423 		c_irq_params->adev = adev;
3424 		c_irq_params->irq_src = int_params.irq_source;
3425 
3426 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3427 				dm_pflip_high_irq, c_irq_params);
3428 
3429 	}
3430 
3431 	/* HPD */
3432 	r = amdgpu_irq_add_id(adev, client_id,
3433 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3434 	if (r) {
3435 		DRM_ERROR("Failed to add hpd irq id!\n");
3436 		return r;
3437 	}
3438 
3439 	register_hpd_handlers(adev);
3440 
3441 	return 0;
3442 }
3443 
3444 #if defined(CONFIG_DRM_AMD_DC_DCN)
3445 /* Register IRQ sources and initialize IRQ callbacks */
3446 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3447 {
3448 	struct dc *dc = adev->dm.dc;
3449 	struct common_irq_params *c_irq_params;
3450 	struct dc_interrupt_params int_params = {0};
3451 	int r;
3452 	int i;
3453 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3454 	static const unsigned int vrtl_int_srcid[] = {
3455 		DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3456 		DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3457 		DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3458 		DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3459 		DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3460 		DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3461 	};
3462 #endif
3463 
3464 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3465 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3466 
3467 	/*
3468 	 * Actions of amdgpu_irq_add_id():
3469 	 * 1. Register a set() function with base driver.
3470 	 *    Base driver will call set() function to enable/disable an
3471 	 *    interrupt in DC hardware.
3472 	 * 2. Register amdgpu_dm_irq_handler().
3473 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3474 	 *    coming from DC hardware.
3475 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3476 	 *    for acknowledging and handling.
3477 	 */
3478 
3479 	/* Use VSTARTUP interrupt */
3480 	for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3481 			i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3482 			i++) {
3483 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
3484 
3485 		if (r) {
3486 			DRM_ERROR("Failed to add crtc irq id!\n");
3487 			return r;
3488 		}
3489 
3490 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3491 		int_params.irq_source =
3492 			dc_interrupt_to_irq_source(dc, i, 0);
3493 
3494 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3495 
3496 		c_irq_params->adev = adev;
3497 		c_irq_params->irq_src = int_params.irq_source;
3498 
3499 		amdgpu_dm_irq_register_interrupt(
3500 			adev, &int_params, dm_crtc_high_irq, c_irq_params);
3501 	}
3502 
3503 	/* Use otg vertical line interrupt */
3504 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3505 	for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3506 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3507 				vrtl_int_srcid[i], &adev->vline0_irq);
3508 
3509 		if (r) {
3510 			DRM_ERROR("Failed to add vline0 irq id!\n");
3511 			return r;
3512 		}
3513 
3514 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3515 		int_params.irq_source =
3516 			dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3517 
3518 		if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3519 			DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3520 			break;
3521 		}
3522 
3523 		c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3524 					- DC_IRQ_SOURCE_DC1_VLINE0];
3525 
3526 		c_irq_params->adev = adev;
3527 		c_irq_params->irq_src = int_params.irq_source;
3528 
3529 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3530 				dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3531 	}
3532 #endif
3533 
3534 	/* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3535 	 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3536 	 * to trigger at end of each vblank, regardless of state of the lock,
3537 	 * matching DCE behaviour.
3538 	 */
3539 	for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3540 	     i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3541 	     i++) {
3542 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3543 
3544 		if (r) {
3545 			DRM_ERROR("Failed to add vupdate irq id!\n");
3546 			return r;
3547 		}
3548 
3549 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3550 		int_params.irq_source =
3551 			dc_interrupt_to_irq_source(dc, i, 0);
3552 
3553 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3554 
3555 		c_irq_params->adev = adev;
3556 		c_irq_params->irq_src = int_params.irq_source;
3557 
3558 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3559 				dm_vupdate_high_irq, c_irq_params);
3560 	}
3561 
3562 	/* Use GRPH_PFLIP interrupt */
3563 	for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3564 			i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
3565 			i++) {
3566 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
3567 		if (r) {
3568 			DRM_ERROR("Failed to add page flip irq id!\n");
3569 			return r;
3570 		}
3571 
3572 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3573 		int_params.irq_source =
3574 			dc_interrupt_to_irq_source(dc, i, 0);
3575 
3576 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3577 
3578 		c_irq_params->adev = adev;
3579 		c_irq_params->irq_src = int_params.irq_source;
3580 
3581 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3582 				dm_pflip_high_irq, c_irq_params);
3583 
3584 	}
3585 
3586 	/* HPD */
3587 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3588 			&adev->hpd_irq);
3589 	if (r) {
3590 		DRM_ERROR("Failed to add hpd irq id!\n");
3591 		return r;
3592 	}
3593 
3594 	register_hpd_handlers(adev);
3595 
3596 	return 0;
3597 }
3598 /* Register Outbox IRQ sources and initialize IRQ callbacks */
3599 static int register_outbox_irq_handlers(struct amdgpu_device *adev)
3600 {
3601 	struct dc *dc = adev->dm.dc;
3602 	struct common_irq_params *c_irq_params;
3603 	struct dc_interrupt_params int_params = {0};
3604 	int r, i;
3605 
3606 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3607 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3608 
3609 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
3610 			&adev->dmub_outbox_irq);
3611 	if (r) {
3612 		DRM_ERROR("Failed to add outbox irq id!\n");
3613 		return r;
3614 	}
3615 
3616 	if (dc->ctx->dmub_srv) {
3617 		i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
3618 		int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3619 		int_params.irq_source =
3620 		dc_interrupt_to_irq_source(dc, i, 0);
3621 
3622 		c_irq_params = &adev->dm.dmub_outbox_params[0];
3623 
3624 		c_irq_params->adev = adev;
3625 		c_irq_params->irq_src = int_params.irq_source;
3626 
3627 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3628 				dm_dmub_outbox1_low_irq, c_irq_params);
3629 	}
3630 
3631 	return 0;
3632 }
3633 #endif
3634 
3635 /*
3636  * Acquires the lock for the atomic state object and returns
3637  * the new atomic state.
3638  *
3639  * This should only be called during atomic check.
3640  */
3641 static int dm_atomic_get_state(struct drm_atomic_state *state,
3642 			       struct dm_atomic_state **dm_state)
3643 {
3644 	struct drm_device *dev = state->dev;
3645 	struct amdgpu_device *adev = drm_to_adev(dev);
3646 	struct amdgpu_display_manager *dm = &adev->dm;
3647 	struct drm_private_state *priv_state;
3648 
3649 	if (*dm_state)
3650 		return 0;
3651 
3652 	priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3653 	if (IS_ERR(priv_state))
3654 		return PTR_ERR(priv_state);
3655 
3656 	*dm_state = to_dm_atomic_state(priv_state);
3657 
3658 	return 0;
3659 }
3660 
3661 static struct dm_atomic_state *
3662 dm_atomic_get_new_state(struct drm_atomic_state *state)
3663 {
3664 	struct drm_device *dev = state->dev;
3665 	struct amdgpu_device *adev = drm_to_adev(dev);
3666 	struct amdgpu_display_manager *dm = &adev->dm;
3667 	struct drm_private_obj *obj;
3668 	struct drm_private_state *new_obj_state;
3669 	int i;
3670 
3671 	for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3672 		if (obj->funcs == dm->atomic_obj.funcs)
3673 			return to_dm_atomic_state(new_obj_state);
3674 	}
3675 
3676 	return NULL;
3677 }
3678 
3679 static struct drm_private_state *
3680 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3681 {
3682 	struct dm_atomic_state *old_state, *new_state;
3683 
3684 	new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3685 	if (!new_state)
3686 		return NULL;
3687 
3688 	__drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3689 
3690 	old_state = to_dm_atomic_state(obj->state);
3691 
3692 	if (old_state && old_state->context)
3693 		new_state->context = dc_copy_state(old_state->context);
3694 
3695 	if (!new_state->context) {
3696 		kfree(new_state);
3697 		return NULL;
3698 	}
3699 
3700 	return &new_state->base;
3701 }
3702 
3703 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3704 				    struct drm_private_state *state)
3705 {
3706 	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3707 
3708 	if (dm_state && dm_state->context)
3709 		dc_release_state(dm_state->context);
3710 
3711 	kfree(dm_state);
3712 }
3713 
3714 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3715 	.atomic_duplicate_state = dm_atomic_duplicate_state,
3716 	.atomic_destroy_state = dm_atomic_destroy_state,
3717 };
3718 
3719 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3720 {
3721 	struct dm_atomic_state *state;
3722 	int r;
3723 
3724 	adev->mode_info.mode_config_initialized = true;
3725 
3726 	adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3727 	adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3728 
3729 	adev_to_drm(adev)->mode_config.max_width = 16384;
3730 	adev_to_drm(adev)->mode_config.max_height = 16384;
3731 
3732 	adev_to_drm(adev)->mode_config.preferred_depth = 24;
3733 	adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3734 	/* indicates support for immediate flip */
3735 	adev_to_drm(adev)->mode_config.async_page_flip = true;
3736 
3737 	adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3738 
3739 	state = kzalloc(sizeof(*state), GFP_KERNEL);
3740 	if (!state)
3741 		return -ENOMEM;
3742 
3743 	state->context = dc_create_state(adev->dm.dc);
3744 	if (!state->context) {
3745 		kfree(state);
3746 		return -ENOMEM;
3747 	}
3748 
3749 	dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3750 
3751 	drm_atomic_private_obj_init(adev_to_drm(adev),
3752 				    &adev->dm.atomic_obj,
3753 				    &state->base,
3754 				    &dm_atomic_state_funcs);
3755 
3756 	r = amdgpu_display_modeset_create_props(adev);
3757 	if (r) {
3758 		dc_release_state(state->context);
3759 		kfree(state);
3760 		return r;
3761 	}
3762 
3763 	r = amdgpu_dm_audio_init(adev);
3764 	if (r) {
3765 		dc_release_state(state->context);
3766 		kfree(state);
3767 		return r;
3768 	}
3769 
3770 	return 0;
3771 }
3772 
3773 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3774 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3775 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3776 
3777 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3778 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3779 
3780 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
3781 					    int bl_idx)
3782 {
3783 #if defined(CONFIG_ACPI)
3784 	struct amdgpu_dm_backlight_caps caps;
3785 
3786 	memset(&caps, 0, sizeof(caps));
3787 
3788 	if (dm->backlight_caps[bl_idx].caps_valid)
3789 		return;
3790 
3791 	amdgpu_acpi_get_backlight_caps(&caps);
3792 	if (caps.caps_valid) {
3793 		dm->backlight_caps[bl_idx].caps_valid = true;
3794 		if (caps.aux_support)
3795 			return;
3796 		dm->backlight_caps[bl_idx].min_input_signal = caps.min_input_signal;
3797 		dm->backlight_caps[bl_idx].max_input_signal = caps.max_input_signal;
3798 	} else {
3799 		dm->backlight_caps[bl_idx].min_input_signal =
3800 				AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3801 		dm->backlight_caps[bl_idx].max_input_signal =
3802 				AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3803 	}
3804 #else
3805 	if (dm->backlight_caps[bl_idx].aux_support)
3806 		return;
3807 
3808 	dm->backlight_caps[bl_idx].min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3809 	dm->backlight_caps[bl_idx].max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3810 #endif
3811 }
3812 
3813 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3814 				unsigned *min, unsigned *max)
3815 {
3816 	if (!caps)
3817 		return 0;
3818 
3819 	if (caps->aux_support) {
3820 		// Firmware limits are in nits, DC API wants millinits.
3821 		*max = 1000 * caps->aux_max_input_signal;
3822 		*min = 1000 * caps->aux_min_input_signal;
3823 	} else {
3824 		// Firmware limits are 8-bit, PWM control is 16-bit.
3825 		*max = 0x101 * caps->max_input_signal;
3826 		*min = 0x101 * caps->min_input_signal;
3827 	}
3828 	return 1;
3829 }
3830 
3831 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3832 					uint32_t brightness)
3833 {
3834 	unsigned min, max;
3835 
3836 	if (!get_brightness_range(caps, &min, &max))
3837 		return brightness;
3838 
3839 	// Rescale 0..255 to min..max
3840 	return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3841 				       AMDGPU_MAX_BL_LEVEL);
3842 }
3843 
3844 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3845 				      uint32_t brightness)
3846 {
3847 	unsigned min, max;
3848 
3849 	if (!get_brightness_range(caps, &min, &max))
3850 		return brightness;
3851 
3852 	if (brightness < min)
3853 		return 0;
3854 	// Rescale min..max to 0..255
3855 	return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3856 				 max - min);
3857 }
3858 
3859 static int amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
3860 					 int bl_idx,
3861 					 u32 user_brightness)
3862 {
3863 	struct amdgpu_dm_backlight_caps caps;
3864 	struct dc_link *link;
3865 	u32 brightness;
3866 	bool rc;
3867 
3868 	amdgpu_dm_update_backlight_caps(dm, bl_idx);
3869 	caps = dm->backlight_caps[bl_idx];
3870 
3871 	dm->brightness[bl_idx] = user_brightness;
3872 	brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]);
3873 	link = (struct dc_link *)dm->backlight_link[bl_idx];
3874 
3875 	/* Change brightness based on AUX property */
3876 	if (caps.aux_support) {
3877 		rc = dc_link_set_backlight_level_nits(link, true, brightness,
3878 						      AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3879 		if (!rc)
3880 			DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", bl_idx);
3881 	} else {
3882 		rc = dc_link_set_backlight_level(link, brightness, 0);
3883 		if (!rc)
3884 			DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx);
3885 	}
3886 
3887 	return rc ? 0 : 1;
3888 }
3889 
3890 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3891 {
3892 	struct amdgpu_display_manager *dm = bl_get_data(bd);
3893 	int i;
3894 
3895 	for (i = 0; i < dm->num_of_edps; i++) {
3896 		if (bd == dm->backlight_dev[i])
3897 			break;
3898 	}
3899 	if (i >= AMDGPU_DM_MAX_NUM_EDP)
3900 		i = 0;
3901 	amdgpu_dm_backlight_set_level(dm, i, bd->props.brightness);
3902 
3903 	return 0;
3904 }
3905 
3906 static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm,
3907 					 int bl_idx)
3908 {
3909 	struct amdgpu_dm_backlight_caps caps;
3910 	struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx];
3911 
3912 	amdgpu_dm_update_backlight_caps(dm, bl_idx);
3913 	caps = dm->backlight_caps[bl_idx];
3914 
3915 	if (caps.aux_support) {
3916 		u32 avg, peak;
3917 		bool rc;
3918 
3919 		rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
3920 		if (!rc)
3921 			return dm->brightness[bl_idx];
3922 		return convert_brightness_to_user(&caps, avg);
3923 	} else {
3924 		int ret = dc_link_get_backlight_level(link);
3925 
3926 		if (ret == DC_ERROR_UNEXPECTED)
3927 			return dm->brightness[bl_idx];
3928 		return convert_brightness_to_user(&caps, ret);
3929 	}
3930 }
3931 
3932 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3933 {
3934 	struct amdgpu_display_manager *dm = bl_get_data(bd);
3935 	int i;
3936 
3937 	for (i = 0; i < dm->num_of_edps; i++) {
3938 		if (bd == dm->backlight_dev[i])
3939 			break;
3940 	}
3941 	if (i >= AMDGPU_DM_MAX_NUM_EDP)
3942 		i = 0;
3943 	return amdgpu_dm_backlight_get_level(dm, i);
3944 }
3945 
3946 static const struct backlight_ops amdgpu_dm_backlight_ops = {
3947 	.options = BL_CORE_SUSPENDRESUME,
3948 	.get_brightness = amdgpu_dm_backlight_get_brightness,
3949 	.update_status	= amdgpu_dm_backlight_update_status,
3950 };
3951 
3952 static void
3953 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
3954 {
3955 	char bl_name[16];
3956 	struct backlight_properties props = { 0 };
3957 
3958 	amdgpu_dm_update_backlight_caps(dm, dm->num_of_edps);
3959 	dm->brightness[dm->num_of_edps] = AMDGPU_MAX_BL_LEVEL;
3960 
3961 	props.max_brightness = AMDGPU_MAX_BL_LEVEL;
3962 	props.brightness = AMDGPU_MAX_BL_LEVEL;
3963 	props.type = BACKLIGHT_RAW;
3964 
3965 	snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
3966 		 adev_to_drm(dm->adev)->primary->index + dm->num_of_edps);
3967 
3968 	dm->backlight_dev[dm->num_of_edps] = backlight_device_register(bl_name,
3969 								       adev_to_drm(dm->adev)->dev,
3970 								       dm,
3971 								       &amdgpu_dm_backlight_ops,
3972 								       &props);
3973 
3974 	if (IS_ERR(dm->backlight_dev[dm->num_of_edps]))
3975 		DRM_ERROR("DM: Backlight registration failed!\n");
3976 	else
3977 		DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
3978 }
3979 #endif
3980 
3981 static int initialize_plane(struct amdgpu_display_manager *dm,
3982 			    struct amdgpu_mode_info *mode_info, int plane_id,
3983 			    enum drm_plane_type plane_type,
3984 			    const struct dc_plane_cap *plane_cap)
3985 {
3986 	struct drm_plane *plane;
3987 	unsigned long possible_crtcs;
3988 	int ret = 0;
3989 
3990 	plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
3991 	if (!plane) {
3992 		DRM_ERROR("KMS: Failed to allocate plane\n");
3993 		return -ENOMEM;
3994 	}
3995 	plane->type = plane_type;
3996 
3997 	/*
3998 	 * HACK: IGT tests expect that the primary plane for a CRTC
3999 	 * can only have one possible CRTC. Only expose support for
4000 	 * any CRTC if they're not going to be used as a primary plane
4001 	 * for a CRTC - like overlay or underlay planes.
4002 	 */
4003 	possible_crtcs = 1 << plane_id;
4004 	if (plane_id >= dm->dc->caps.max_streams)
4005 		possible_crtcs = 0xff;
4006 
4007 	ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
4008 
4009 	if (ret) {
4010 		DRM_ERROR("KMS: Failed to initialize plane\n");
4011 		kfree(plane);
4012 		return ret;
4013 	}
4014 
4015 	if (mode_info)
4016 		mode_info->planes[plane_id] = plane;
4017 
4018 	return ret;
4019 }
4020 
4021 
4022 static void register_backlight_device(struct amdgpu_display_manager *dm,
4023 				      struct dc_link *link)
4024 {
4025 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
4026 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
4027 
4028 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
4029 	    link->type != dc_connection_none) {
4030 		/*
4031 		 * Event if registration failed, we should continue with
4032 		 * DM initialization because not having a backlight control
4033 		 * is better then a black screen.
4034 		 */
4035 		if (!dm->backlight_dev[dm->num_of_edps])
4036 			amdgpu_dm_register_backlight_device(dm);
4037 
4038 		if (dm->backlight_dev[dm->num_of_edps]) {
4039 			dm->backlight_link[dm->num_of_edps] = link;
4040 			dm->num_of_edps++;
4041 		}
4042 	}
4043 #endif
4044 }
4045 
4046 
4047 /*
4048  * In this architecture, the association
4049  * connector -> encoder -> crtc
4050  * id not really requried. The crtc and connector will hold the
4051  * display_index as an abstraction to use with DAL component
4052  *
4053  * Returns 0 on success
4054  */
4055 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
4056 {
4057 	struct amdgpu_display_manager *dm = &adev->dm;
4058 	int32_t i;
4059 	struct amdgpu_dm_connector *aconnector = NULL;
4060 	struct amdgpu_encoder *aencoder = NULL;
4061 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
4062 	uint32_t link_cnt;
4063 	int32_t primary_planes;
4064 	enum dc_connection_type new_connection_type = dc_connection_none;
4065 	const struct dc_plane_cap *plane;
4066 	bool psr_feature_enabled = false;
4067 
4068 	dm->display_indexes_num = dm->dc->caps.max_streams;
4069 	/* Update the actual used number of crtc */
4070 	adev->mode_info.num_crtc = adev->dm.display_indexes_num;
4071 
4072 	link_cnt = dm->dc->caps.max_links;
4073 	if (amdgpu_dm_mode_config_init(dm->adev)) {
4074 		DRM_ERROR("DM: Failed to initialize mode config\n");
4075 		return -EINVAL;
4076 	}
4077 
4078 	/* There is one primary plane per CRTC */
4079 	primary_planes = dm->dc->caps.max_streams;
4080 	ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
4081 
4082 	/*
4083 	 * Initialize primary planes, implicit planes for legacy IOCTLS.
4084 	 * Order is reversed to match iteration order in atomic check.
4085 	 */
4086 	for (i = (primary_planes - 1); i >= 0; i--) {
4087 		plane = &dm->dc->caps.planes[i];
4088 
4089 		if (initialize_plane(dm, mode_info, i,
4090 				     DRM_PLANE_TYPE_PRIMARY, plane)) {
4091 			DRM_ERROR("KMS: Failed to initialize primary plane\n");
4092 			goto fail;
4093 		}
4094 	}
4095 
4096 	/*
4097 	 * Initialize overlay planes, index starting after primary planes.
4098 	 * These planes have a higher DRM index than the primary planes since
4099 	 * they should be considered as having a higher z-order.
4100 	 * Order is reversed to match iteration order in atomic check.
4101 	 *
4102 	 * Only support DCN for now, and only expose one so we don't encourage
4103 	 * userspace to use up all the pipes.
4104 	 */
4105 	for (i = 0; i < dm->dc->caps.max_planes; ++i) {
4106 		struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
4107 
4108 		if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
4109 			continue;
4110 
4111 		if (!plane->blends_with_above || !plane->blends_with_below)
4112 			continue;
4113 
4114 		if (!plane->pixel_format_support.argb8888)
4115 			continue;
4116 
4117 		if (initialize_plane(dm, NULL, primary_planes + i,
4118 				     DRM_PLANE_TYPE_OVERLAY, plane)) {
4119 			DRM_ERROR("KMS: Failed to initialize overlay plane\n");
4120 			goto fail;
4121 		}
4122 
4123 		/* Only create one overlay plane. */
4124 		break;
4125 	}
4126 
4127 	for (i = 0; i < dm->dc->caps.max_streams; i++)
4128 		if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
4129 			DRM_ERROR("KMS: Failed to initialize crtc\n");
4130 			goto fail;
4131 		}
4132 
4133 #if defined(CONFIG_DRM_AMD_DC_DCN)
4134 	/* Use Outbox interrupt */
4135 	switch (adev->ip_versions[DCE_HWIP][0]) {
4136 	case IP_VERSION(3, 0, 0):
4137 	case IP_VERSION(3, 1, 2):
4138 	case IP_VERSION(3, 1, 3):
4139 	case IP_VERSION(2, 1, 0):
4140 		if (register_outbox_irq_handlers(dm->adev)) {
4141 			DRM_ERROR("DM: Failed to initialize IRQ\n");
4142 			goto fail;
4143 		}
4144 		break;
4145 	default:
4146 		DRM_DEBUG_KMS("Unsupported DCN IP version for outbox: 0x%X\n",
4147 			      adev->ip_versions[DCE_HWIP][0]);
4148 	}
4149 
4150 	/* Determine whether to enable PSR support by default. */
4151 	if (!(amdgpu_dc_debug_mask & DC_DISABLE_PSR)) {
4152 		switch (adev->ip_versions[DCE_HWIP][0]) {
4153 		case IP_VERSION(3, 1, 2):
4154 		case IP_VERSION(3, 1, 3):
4155 			psr_feature_enabled = true;
4156 			break;
4157 		default:
4158 			psr_feature_enabled = amdgpu_dc_feature_mask & DC_PSR_MASK;
4159 			break;
4160 		}
4161 	}
4162 #endif
4163 
4164 	/* loops over all connectors on the board */
4165 	for (i = 0; i < link_cnt; i++) {
4166 		struct dc_link *link = NULL;
4167 
4168 		if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
4169 			DRM_ERROR(
4170 				"KMS: Cannot support more than %d display indexes\n",
4171 					AMDGPU_DM_MAX_DISPLAY_INDEX);
4172 			continue;
4173 		}
4174 
4175 		aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
4176 		if (!aconnector)
4177 			goto fail;
4178 
4179 		aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
4180 		if (!aencoder)
4181 			goto fail;
4182 
4183 		if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
4184 			DRM_ERROR("KMS: Failed to initialize encoder\n");
4185 			goto fail;
4186 		}
4187 
4188 		if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
4189 			DRM_ERROR("KMS: Failed to initialize connector\n");
4190 			goto fail;
4191 		}
4192 
4193 		link = dc_get_link_at_index(dm->dc, i);
4194 
4195 		if (!dc_link_detect_sink(link, &new_connection_type))
4196 			DRM_ERROR("KMS: Failed to detect connector\n");
4197 
4198 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
4199 			emulated_link_detect(link);
4200 			amdgpu_dm_update_connector_after_detect(aconnector);
4201 
4202 		} else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
4203 			amdgpu_dm_update_connector_after_detect(aconnector);
4204 			register_backlight_device(dm, link);
4205 
4206 			if (psr_feature_enabled)
4207 				amdgpu_dm_set_psr_caps(link);
4208 		}
4209 
4210 
4211 	}
4212 
4213 	/* Software is initialized. Now we can register interrupt handlers. */
4214 	switch (adev->asic_type) {
4215 #if defined(CONFIG_DRM_AMD_DC_SI)
4216 	case CHIP_TAHITI:
4217 	case CHIP_PITCAIRN:
4218 	case CHIP_VERDE:
4219 	case CHIP_OLAND:
4220 		if (dce60_register_irq_handlers(dm->adev)) {
4221 			DRM_ERROR("DM: Failed to initialize IRQ\n");
4222 			goto fail;
4223 		}
4224 		break;
4225 #endif
4226 	case CHIP_BONAIRE:
4227 	case CHIP_HAWAII:
4228 	case CHIP_KAVERI:
4229 	case CHIP_KABINI:
4230 	case CHIP_MULLINS:
4231 	case CHIP_TONGA:
4232 	case CHIP_FIJI:
4233 	case CHIP_CARRIZO:
4234 	case CHIP_STONEY:
4235 	case CHIP_POLARIS11:
4236 	case CHIP_POLARIS10:
4237 	case CHIP_POLARIS12:
4238 	case CHIP_VEGAM:
4239 	case CHIP_VEGA10:
4240 	case CHIP_VEGA12:
4241 	case CHIP_VEGA20:
4242 		if (dce110_register_irq_handlers(dm->adev)) {
4243 			DRM_ERROR("DM: Failed to initialize IRQ\n");
4244 			goto fail;
4245 		}
4246 		break;
4247 	default:
4248 #if defined(CONFIG_DRM_AMD_DC_DCN)
4249 		switch (adev->ip_versions[DCE_HWIP][0]) {
4250 		case IP_VERSION(1, 0, 0):
4251 		case IP_VERSION(1, 0, 1):
4252 		case IP_VERSION(2, 0, 2):
4253 		case IP_VERSION(2, 0, 3):
4254 		case IP_VERSION(2, 0, 0):
4255 		case IP_VERSION(2, 1, 0):
4256 		case IP_VERSION(3, 0, 0):
4257 		case IP_VERSION(3, 0, 2):
4258 		case IP_VERSION(3, 0, 3):
4259 		case IP_VERSION(3, 0, 1):
4260 		case IP_VERSION(3, 1, 2):
4261 		case IP_VERSION(3, 1, 3):
4262 			if (dcn10_register_irq_handlers(dm->adev)) {
4263 				DRM_ERROR("DM: Failed to initialize IRQ\n");
4264 				goto fail;
4265 			}
4266 			break;
4267 		default:
4268 			DRM_ERROR("Unsupported DCE IP versions: 0x%X\n",
4269 					adev->ip_versions[DCE_HWIP][0]);
4270 			goto fail;
4271 		}
4272 #endif
4273 		break;
4274 	}
4275 
4276 	return 0;
4277 fail:
4278 	kfree(aencoder);
4279 	kfree(aconnector);
4280 
4281 	return -EINVAL;
4282 }
4283 
4284 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
4285 {
4286 	drm_atomic_private_obj_fini(&dm->atomic_obj);
4287 	return;
4288 }
4289 
4290 /******************************************************************************
4291  * amdgpu_display_funcs functions
4292  *****************************************************************************/
4293 
4294 /*
4295  * dm_bandwidth_update - program display watermarks
4296  *
4297  * @adev: amdgpu_device pointer
4298  *
4299  * Calculate and program the display watermarks and line buffer allocation.
4300  */
4301 static void dm_bandwidth_update(struct amdgpu_device *adev)
4302 {
4303 	/* TODO: implement later */
4304 }
4305 
4306 static const struct amdgpu_display_funcs dm_display_funcs = {
4307 	.bandwidth_update = dm_bandwidth_update, /* called unconditionally */
4308 	.vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
4309 	.backlight_set_level = NULL, /* never called for DC */
4310 	.backlight_get_level = NULL, /* never called for DC */
4311 	.hpd_sense = NULL,/* called unconditionally */
4312 	.hpd_set_polarity = NULL, /* called unconditionally */
4313 	.hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
4314 	.page_flip_get_scanoutpos =
4315 		dm_crtc_get_scanoutpos,/* called unconditionally */
4316 	.add_encoder = NULL, /* VBIOS parsing. DAL does it. */
4317 	.add_connector = NULL, /* VBIOS parsing. DAL does it. */
4318 };
4319 
4320 #if defined(CONFIG_DEBUG_KERNEL_DC)
4321 
4322 static ssize_t s3_debug_store(struct device *device,
4323 			      struct device_attribute *attr,
4324 			      const char *buf,
4325 			      size_t count)
4326 {
4327 	int ret;
4328 	int s3_state;
4329 	struct drm_device *drm_dev = dev_get_drvdata(device);
4330 	struct amdgpu_device *adev = drm_to_adev(drm_dev);
4331 
4332 	ret = kstrtoint(buf, 0, &s3_state);
4333 
4334 	if (ret == 0) {
4335 		if (s3_state) {
4336 			dm_resume(adev);
4337 			drm_kms_helper_hotplug_event(adev_to_drm(adev));
4338 		} else
4339 			dm_suspend(adev);
4340 	}
4341 
4342 	return ret == 0 ? count : 0;
4343 }
4344 
4345 DEVICE_ATTR_WO(s3_debug);
4346 
4347 #endif
4348 
4349 static int dm_early_init(void *handle)
4350 {
4351 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4352 
4353 	switch (adev->asic_type) {
4354 #if defined(CONFIG_DRM_AMD_DC_SI)
4355 	case CHIP_TAHITI:
4356 	case CHIP_PITCAIRN:
4357 	case CHIP_VERDE:
4358 		adev->mode_info.num_crtc = 6;
4359 		adev->mode_info.num_hpd = 6;
4360 		adev->mode_info.num_dig = 6;
4361 		break;
4362 	case CHIP_OLAND:
4363 		adev->mode_info.num_crtc = 2;
4364 		adev->mode_info.num_hpd = 2;
4365 		adev->mode_info.num_dig = 2;
4366 		break;
4367 #endif
4368 	case CHIP_BONAIRE:
4369 	case CHIP_HAWAII:
4370 		adev->mode_info.num_crtc = 6;
4371 		adev->mode_info.num_hpd = 6;
4372 		adev->mode_info.num_dig = 6;
4373 		break;
4374 	case CHIP_KAVERI:
4375 		adev->mode_info.num_crtc = 4;
4376 		adev->mode_info.num_hpd = 6;
4377 		adev->mode_info.num_dig = 7;
4378 		break;
4379 	case CHIP_KABINI:
4380 	case CHIP_MULLINS:
4381 		adev->mode_info.num_crtc = 2;
4382 		adev->mode_info.num_hpd = 6;
4383 		adev->mode_info.num_dig = 6;
4384 		break;
4385 	case CHIP_FIJI:
4386 	case CHIP_TONGA:
4387 		adev->mode_info.num_crtc = 6;
4388 		adev->mode_info.num_hpd = 6;
4389 		adev->mode_info.num_dig = 7;
4390 		break;
4391 	case CHIP_CARRIZO:
4392 		adev->mode_info.num_crtc = 3;
4393 		adev->mode_info.num_hpd = 6;
4394 		adev->mode_info.num_dig = 9;
4395 		break;
4396 	case CHIP_STONEY:
4397 		adev->mode_info.num_crtc = 2;
4398 		adev->mode_info.num_hpd = 6;
4399 		adev->mode_info.num_dig = 9;
4400 		break;
4401 	case CHIP_POLARIS11:
4402 	case CHIP_POLARIS12:
4403 		adev->mode_info.num_crtc = 5;
4404 		adev->mode_info.num_hpd = 5;
4405 		adev->mode_info.num_dig = 5;
4406 		break;
4407 	case CHIP_POLARIS10:
4408 	case CHIP_VEGAM:
4409 		adev->mode_info.num_crtc = 6;
4410 		adev->mode_info.num_hpd = 6;
4411 		adev->mode_info.num_dig = 6;
4412 		break;
4413 	case CHIP_VEGA10:
4414 	case CHIP_VEGA12:
4415 	case CHIP_VEGA20:
4416 		adev->mode_info.num_crtc = 6;
4417 		adev->mode_info.num_hpd = 6;
4418 		adev->mode_info.num_dig = 6;
4419 		break;
4420 	default:
4421 #if defined(CONFIG_DRM_AMD_DC_DCN)
4422 		switch (adev->ip_versions[DCE_HWIP][0]) {
4423 		case IP_VERSION(2, 0, 2):
4424 		case IP_VERSION(3, 0, 0):
4425 			adev->mode_info.num_crtc = 6;
4426 			adev->mode_info.num_hpd = 6;
4427 			adev->mode_info.num_dig = 6;
4428 			break;
4429 		case IP_VERSION(2, 0, 0):
4430 		case IP_VERSION(3, 0, 2):
4431 			adev->mode_info.num_crtc = 5;
4432 			adev->mode_info.num_hpd = 5;
4433 			adev->mode_info.num_dig = 5;
4434 			break;
4435 		case IP_VERSION(2, 0, 3):
4436 		case IP_VERSION(3, 0, 3):
4437 			adev->mode_info.num_crtc = 2;
4438 			adev->mode_info.num_hpd = 2;
4439 			adev->mode_info.num_dig = 2;
4440 			break;
4441 		case IP_VERSION(1, 0, 0):
4442 		case IP_VERSION(1, 0, 1):
4443 		case IP_VERSION(3, 0, 1):
4444 		case IP_VERSION(2, 1, 0):
4445 		case IP_VERSION(3, 1, 2):
4446 		case IP_VERSION(3, 1, 3):
4447 			adev->mode_info.num_crtc = 4;
4448 			adev->mode_info.num_hpd = 4;
4449 			adev->mode_info.num_dig = 4;
4450 			break;
4451 		default:
4452 			DRM_ERROR("Unsupported DCE IP versions: 0x%x\n",
4453 					adev->ip_versions[DCE_HWIP][0]);
4454 			return -EINVAL;
4455 		}
4456 #endif
4457 		break;
4458 	}
4459 
4460 	amdgpu_dm_set_irq_funcs(adev);
4461 
4462 	if (adev->mode_info.funcs == NULL)
4463 		adev->mode_info.funcs = &dm_display_funcs;
4464 
4465 	/*
4466 	 * Note: Do NOT change adev->audio_endpt_rreg and
4467 	 * adev->audio_endpt_wreg because they are initialised in
4468 	 * amdgpu_device_init()
4469 	 */
4470 #if defined(CONFIG_DEBUG_KERNEL_DC)
4471 	device_create_file(
4472 		adev_to_drm(adev)->dev,
4473 		&dev_attr_s3_debug);
4474 #endif
4475 
4476 	return 0;
4477 }
4478 
4479 static bool modeset_required(struct drm_crtc_state *crtc_state,
4480 			     struct dc_stream_state *new_stream,
4481 			     struct dc_stream_state *old_stream)
4482 {
4483 	return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4484 }
4485 
4486 static bool modereset_required(struct drm_crtc_state *crtc_state)
4487 {
4488 	return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4489 }
4490 
4491 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
4492 {
4493 	drm_encoder_cleanup(encoder);
4494 	kfree(encoder);
4495 }
4496 
4497 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
4498 	.destroy = amdgpu_dm_encoder_destroy,
4499 };
4500 
4501 
4502 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
4503 					 struct drm_framebuffer *fb,
4504 					 int *min_downscale, int *max_upscale)
4505 {
4506 	struct amdgpu_device *adev = drm_to_adev(dev);
4507 	struct dc *dc = adev->dm.dc;
4508 	/* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
4509 	struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
4510 
4511 	switch (fb->format->format) {
4512 	case DRM_FORMAT_P010:
4513 	case DRM_FORMAT_NV12:
4514 	case DRM_FORMAT_NV21:
4515 		*max_upscale = plane_cap->max_upscale_factor.nv12;
4516 		*min_downscale = plane_cap->max_downscale_factor.nv12;
4517 		break;
4518 
4519 	case DRM_FORMAT_XRGB16161616F:
4520 	case DRM_FORMAT_ARGB16161616F:
4521 	case DRM_FORMAT_XBGR16161616F:
4522 	case DRM_FORMAT_ABGR16161616F:
4523 		*max_upscale = plane_cap->max_upscale_factor.fp16;
4524 		*min_downscale = plane_cap->max_downscale_factor.fp16;
4525 		break;
4526 
4527 	default:
4528 		*max_upscale = plane_cap->max_upscale_factor.argb8888;
4529 		*min_downscale = plane_cap->max_downscale_factor.argb8888;
4530 		break;
4531 	}
4532 
4533 	/*
4534 	 * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
4535 	 * scaling factor of 1.0 == 1000 units.
4536 	 */
4537 	if (*max_upscale == 1)
4538 		*max_upscale = 1000;
4539 
4540 	if (*min_downscale == 1)
4541 		*min_downscale = 1000;
4542 }
4543 
4544 
4545 static int fill_dc_scaling_info(const struct drm_plane_state *state,
4546 				struct dc_scaling_info *scaling_info)
4547 {
4548 	int scale_w, scale_h, min_downscale, max_upscale;
4549 
4550 	memset(scaling_info, 0, sizeof(*scaling_info));
4551 
4552 	/* Source is fixed 16.16 but we ignore mantissa for now... */
4553 	scaling_info->src_rect.x = state->src_x >> 16;
4554 	scaling_info->src_rect.y = state->src_y >> 16;
4555 
4556 	/*
4557 	 * For reasons we don't (yet) fully understand a non-zero
4558 	 * src_y coordinate into an NV12 buffer can cause a
4559 	 * system hang. To avoid hangs (and maybe be overly cautious)
4560 	 * let's reject both non-zero src_x and src_y.
4561 	 *
4562 	 * We currently know of only one use-case to reproduce a
4563 	 * scenario with non-zero src_x and src_y for NV12, which
4564 	 * is to gesture the YouTube Android app into full screen
4565 	 * on ChromeOS.
4566 	 */
4567 	if (state->fb &&
4568 	    state->fb->format->format == DRM_FORMAT_NV12 &&
4569 	    (scaling_info->src_rect.x != 0 ||
4570 	     scaling_info->src_rect.y != 0))
4571 		return -EINVAL;
4572 
4573 	scaling_info->src_rect.width = state->src_w >> 16;
4574 	if (scaling_info->src_rect.width == 0)
4575 		return -EINVAL;
4576 
4577 	scaling_info->src_rect.height = state->src_h >> 16;
4578 	if (scaling_info->src_rect.height == 0)
4579 		return -EINVAL;
4580 
4581 	scaling_info->dst_rect.x = state->crtc_x;
4582 	scaling_info->dst_rect.y = state->crtc_y;
4583 
4584 	if (state->crtc_w == 0)
4585 		return -EINVAL;
4586 
4587 	scaling_info->dst_rect.width = state->crtc_w;
4588 
4589 	if (state->crtc_h == 0)
4590 		return -EINVAL;
4591 
4592 	scaling_info->dst_rect.height = state->crtc_h;
4593 
4594 	/* DRM doesn't specify clipping on destination output. */
4595 	scaling_info->clip_rect = scaling_info->dst_rect;
4596 
4597 	/* Validate scaling per-format with DC plane caps */
4598 	if (state->plane && state->plane->dev && state->fb) {
4599 		get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
4600 					     &min_downscale, &max_upscale);
4601 	} else {
4602 		min_downscale = 250;
4603 		max_upscale = 16000;
4604 	}
4605 
4606 	scale_w = scaling_info->dst_rect.width * 1000 /
4607 		  scaling_info->src_rect.width;
4608 
4609 	if (scale_w < min_downscale || scale_w > max_upscale)
4610 		return -EINVAL;
4611 
4612 	scale_h = scaling_info->dst_rect.height * 1000 /
4613 		  scaling_info->src_rect.height;
4614 
4615 	if (scale_h < min_downscale || scale_h > max_upscale)
4616 		return -EINVAL;
4617 
4618 	/*
4619 	 * The "scaling_quality" can be ignored for now, quality = 0 has DC
4620 	 * assume reasonable defaults based on the format.
4621 	 */
4622 
4623 	return 0;
4624 }
4625 
4626 static void
4627 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
4628 				 uint64_t tiling_flags)
4629 {
4630 	/* Fill GFX8 params */
4631 	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
4632 		unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
4633 
4634 		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
4635 		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
4636 		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
4637 		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
4638 		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
4639 
4640 		/* XXX fix me for VI */
4641 		tiling_info->gfx8.num_banks = num_banks;
4642 		tiling_info->gfx8.array_mode =
4643 				DC_ARRAY_2D_TILED_THIN1;
4644 		tiling_info->gfx8.tile_split = tile_split;
4645 		tiling_info->gfx8.bank_width = bankw;
4646 		tiling_info->gfx8.bank_height = bankh;
4647 		tiling_info->gfx8.tile_aspect = mtaspect;
4648 		tiling_info->gfx8.tile_mode =
4649 				DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4650 	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4651 			== DC_ARRAY_1D_TILED_THIN1) {
4652 		tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
4653 	}
4654 
4655 	tiling_info->gfx8.pipe_config =
4656 			AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
4657 }
4658 
4659 static void
4660 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4661 				  union dc_tiling_info *tiling_info)
4662 {
4663 	tiling_info->gfx9.num_pipes =
4664 		adev->gfx.config.gb_addr_config_fields.num_pipes;
4665 	tiling_info->gfx9.num_banks =
4666 		adev->gfx.config.gb_addr_config_fields.num_banks;
4667 	tiling_info->gfx9.pipe_interleave =
4668 		adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4669 	tiling_info->gfx9.num_shader_engines =
4670 		adev->gfx.config.gb_addr_config_fields.num_se;
4671 	tiling_info->gfx9.max_compressed_frags =
4672 		adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4673 	tiling_info->gfx9.num_rb_per_se =
4674 		adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4675 	tiling_info->gfx9.shaderEnable = 1;
4676 	if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
4677 		tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
4678 }
4679 
4680 static int
4681 validate_dcc(struct amdgpu_device *adev,
4682 	     const enum surface_pixel_format format,
4683 	     const enum dc_rotation_angle rotation,
4684 	     const union dc_tiling_info *tiling_info,
4685 	     const struct dc_plane_dcc_param *dcc,
4686 	     const struct dc_plane_address *address,
4687 	     const struct plane_size *plane_size)
4688 {
4689 	struct dc *dc = adev->dm.dc;
4690 	struct dc_dcc_surface_param input;
4691 	struct dc_surface_dcc_cap output;
4692 
4693 	memset(&input, 0, sizeof(input));
4694 	memset(&output, 0, sizeof(output));
4695 
4696 	if (!dcc->enable)
4697 		return 0;
4698 
4699 	if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4700 	    !dc->cap_funcs.get_dcc_compression_cap)
4701 		return -EINVAL;
4702 
4703 	input.format = format;
4704 	input.surface_size.width = plane_size->surface_size.width;
4705 	input.surface_size.height = plane_size->surface_size.height;
4706 	input.swizzle_mode = tiling_info->gfx9.swizzle;
4707 
4708 	if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
4709 		input.scan = SCAN_DIRECTION_HORIZONTAL;
4710 	else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
4711 		input.scan = SCAN_DIRECTION_VERTICAL;
4712 
4713 	if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
4714 		return -EINVAL;
4715 
4716 	if (!output.capable)
4717 		return -EINVAL;
4718 
4719 	if (dcc->independent_64b_blks == 0 &&
4720 	    output.grph.rgb.independent_64b_blks != 0)
4721 		return -EINVAL;
4722 
4723 	return 0;
4724 }
4725 
4726 static bool
4727 modifier_has_dcc(uint64_t modifier)
4728 {
4729 	return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4730 }
4731 
4732 static unsigned
4733 modifier_gfx9_swizzle_mode(uint64_t modifier)
4734 {
4735 	if (modifier == DRM_FORMAT_MOD_LINEAR)
4736 		return 0;
4737 
4738 	return AMD_FMT_MOD_GET(TILE, modifier);
4739 }
4740 
4741 static const struct drm_format_info *
4742 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4743 {
4744 	return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
4745 }
4746 
4747 static void
4748 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4749 				    union dc_tiling_info *tiling_info,
4750 				    uint64_t modifier)
4751 {
4752 	unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4753 	unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4754 	unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4755 	unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4756 
4757 	fill_gfx9_tiling_info_from_device(adev, tiling_info);
4758 
4759 	if (!IS_AMD_FMT_MOD(modifier))
4760 		return;
4761 
4762 	tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4763 	tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4764 
4765 	if (adev->family >= AMDGPU_FAMILY_NV) {
4766 		tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4767 	} else {
4768 		tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4769 
4770 		/* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4771 	}
4772 }
4773 
4774 enum dm_micro_swizzle {
4775 	MICRO_SWIZZLE_Z = 0,
4776 	MICRO_SWIZZLE_S = 1,
4777 	MICRO_SWIZZLE_D = 2,
4778 	MICRO_SWIZZLE_R = 3
4779 };
4780 
4781 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4782 					  uint32_t format,
4783 					  uint64_t modifier)
4784 {
4785 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
4786 	const struct drm_format_info *info = drm_format_info(format);
4787 	int i;
4788 
4789 	enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4790 
4791 	if (!info)
4792 		return false;
4793 
4794 	/*
4795 	 * We always have to allow these modifiers:
4796 	 * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers.
4797 	 * 2. Not passing any modifiers is the same as explicitly passing INVALID.
4798 	 */
4799 	if (modifier == DRM_FORMAT_MOD_LINEAR ||
4800 	    modifier == DRM_FORMAT_MOD_INVALID) {
4801 		return true;
4802 	}
4803 
4804 	/* Check that the modifier is on the list of the plane's supported modifiers. */
4805 	for (i = 0; i < plane->modifier_count; i++) {
4806 		if (modifier == plane->modifiers[i])
4807 			break;
4808 	}
4809 	if (i == plane->modifier_count)
4810 		return false;
4811 
4812 	/*
4813 	 * For D swizzle the canonical modifier depends on the bpp, so check
4814 	 * it here.
4815 	 */
4816 	if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4817 	    adev->family >= AMDGPU_FAMILY_NV) {
4818 		if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4819 			return false;
4820 	}
4821 
4822 	if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4823 	    info->cpp[0] < 8)
4824 		return false;
4825 
4826 	if (modifier_has_dcc(modifier)) {
4827 		/* Per radeonsi comments 16/64 bpp are more complicated. */
4828 		if (info->cpp[0] != 4)
4829 			return false;
4830 		/* We support multi-planar formats, but not when combined with
4831 		 * additional DCC metadata planes. */
4832 		if (info->num_planes > 1)
4833 			return false;
4834 	}
4835 
4836 	return true;
4837 }
4838 
4839 static void
4840 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4841 {
4842 	if (!*mods)
4843 		return;
4844 
4845 	if (*cap - *size < 1) {
4846 		uint64_t new_cap = *cap * 2;
4847 		uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4848 
4849 		if (!new_mods) {
4850 			kfree(*mods);
4851 			*mods = NULL;
4852 			return;
4853 		}
4854 
4855 		memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4856 		kfree(*mods);
4857 		*mods = new_mods;
4858 		*cap = new_cap;
4859 	}
4860 
4861 	(*mods)[*size] = mod;
4862 	*size += 1;
4863 }
4864 
4865 static void
4866 add_gfx9_modifiers(const struct amdgpu_device *adev,
4867 		   uint64_t **mods, uint64_t *size, uint64_t *capacity)
4868 {
4869 	int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4870 	int pipe_xor_bits = min(8, pipes +
4871 				ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4872 	int bank_xor_bits = min(8 - pipe_xor_bits,
4873 				ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4874 	int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4875 		 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4876 
4877 
4878 	if (adev->family == AMDGPU_FAMILY_RV) {
4879 		/* Raven2 and later */
4880 		bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4881 
4882 		/*
4883 		 * No _D DCC swizzles yet because we only allow 32bpp, which
4884 		 * doesn't support _D on DCN
4885 		 */
4886 
4887 		if (has_constant_encode) {
4888 			add_modifier(mods, size, capacity, AMD_FMT_MOD |
4889 				    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4890 				    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4891 				    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4892 				    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4893 				    AMD_FMT_MOD_SET(DCC, 1) |
4894 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4895 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4896 				    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
4897 		}
4898 
4899 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4900 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4901 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4902 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4903 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4904 			    AMD_FMT_MOD_SET(DCC, 1) |
4905 			    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4906 			    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4907 			    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
4908 
4909 		if (has_constant_encode) {
4910 			add_modifier(mods, size, capacity, AMD_FMT_MOD |
4911 				    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4912 				    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4913 				    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4914 				    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4915 				    AMD_FMT_MOD_SET(DCC, 1) |
4916 				    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4917 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4918 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4919 
4920 				    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4921 				    AMD_FMT_MOD_SET(RB, rb) |
4922 				    AMD_FMT_MOD_SET(PIPE, pipes));
4923 		}
4924 
4925 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4926 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4927 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4928 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4929 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4930 			    AMD_FMT_MOD_SET(DCC, 1) |
4931 			    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4932 			    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4933 			    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4934 			    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
4935 			    AMD_FMT_MOD_SET(RB, rb) |
4936 			    AMD_FMT_MOD_SET(PIPE, pipes));
4937 	}
4938 
4939 	/*
4940 	 * Only supported for 64bpp on Raven, will be filtered on format in
4941 	 * dm_plane_format_mod_supported.
4942 	 */
4943 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4944 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
4945 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4946 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4947 		    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4948 
4949 	if (adev->family == AMDGPU_FAMILY_RV) {
4950 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4951 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4952 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4953 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4954 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4955 	}
4956 
4957 	/*
4958 	 * Only supported for 64bpp on Raven, will be filtered on format in
4959 	 * dm_plane_format_mod_supported.
4960 	 */
4961 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4962 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4963 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4964 
4965 	if (adev->family == AMDGPU_FAMILY_RV) {
4966 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4967 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4968 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4969 	}
4970 }
4971 
4972 static void
4973 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
4974 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
4975 {
4976 	int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4977 
4978 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4979 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4980 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4981 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4982 		    AMD_FMT_MOD_SET(DCC, 1) |
4983 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4984 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4985 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4986 
4987 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4988 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4989 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4990 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4991 		    AMD_FMT_MOD_SET(DCC, 1) |
4992 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4993 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4994 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4995 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4996 
4997 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4998 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4999 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5000 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5001 
5002 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5003 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5004 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5005 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5006 
5007 
5008 	/* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5009 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5010 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5011 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5012 
5013 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5014 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5015 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5016 }
5017 
5018 static void
5019 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
5020 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
5021 {
5022 	int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5023 	int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
5024 
5025 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5026 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5027 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5028 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5029 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
5030 		    AMD_FMT_MOD_SET(DCC, 1) |
5031 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5032 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5033 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5034 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5035 
5036 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5037 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5038 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5039 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5040 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
5041 		    AMD_FMT_MOD_SET(DCC, 1) |
5042 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5043 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5044 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5045 
5046 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5047 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5048 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5049 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5050 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
5051 		    AMD_FMT_MOD_SET(DCC, 1) |
5052 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5053 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5054 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5055 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5056 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5057 
5058 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5059 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5060 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5061 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5062 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
5063 		    AMD_FMT_MOD_SET(DCC, 1) |
5064 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5065 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5066 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5067 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5068 
5069 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5070 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5071 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5072 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5073 		    AMD_FMT_MOD_SET(PACKERS, pkrs));
5074 
5075 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5076 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5077 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5078 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5079 		    AMD_FMT_MOD_SET(PACKERS, pkrs));
5080 
5081 	/* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5082 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5083 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5084 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5085 
5086 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5087 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5088 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5089 }
5090 
5091 static int
5092 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
5093 {
5094 	uint64_t size = 0, capacity = 128;
5095 	*mods = NULL;
5096 
5097 	/* We have not hooked up any pre-GFX9 modifiers. */
5098 	if (adev->family < AMDGPU_FAMILY_AI)
5099 		return 0;
5100 
5101 	*mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
5102 
5103 	if (plane_type == DRM_PLANE_TYPE_CURSOR) {
5104 		add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5105 		add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5106 		return *mods ? 0 : -ENOMEM;
5107 	}
5108 
5109 	switch (adev->family) {
5110 	case AMDGPU_FAMILY_AI:
5111 	case AMDGPU_FAMILY_RV:
5112 		add_gfx9_modifiers(adev, mods, &size, &capacity);
5113 		break;
5114 	case AMDGPU_FAMILY_NV:
5115 	case AMDGPU_FAMILY_VGH:
5116 	case AMDGPU_FAMILY_YC:
5117 		if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
5118 			add_gfx10_3_modifiers(adev, mods, &size, &capacity);
5119 		else
5120 			add_gfx10_1_modifiers(adev, mods, &size, &capacity);
5121 		break;
5122 	}
5123 
5124 	add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5125 
5126 	/* INVALID marks the end of the list. */
5127 	add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5128 
5129 	if (!*mods)
5130 		return -ENOMEM;
5131 
5132 	return 0;
5133 }
5134 
5135 static int
5136 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
5137 					  const struct amdgpu_framebuffer *afb,
5138 					  const enum surface_pixel_format format,
5139 					  const enum dc_rotation_angle rotation,
5140 					  const struct plane_size *plane_size,
5141 					  union dc_tiling_info *tiling_info,
5142 					  struct dc_plane_dcc_param *dcc,
5143 					  struct dc_plane_address *address,
5144 					  const bool force_disable_dcc)
5145 {
5146 	const uint64_t modifier = afb->base.modifier;
5147 	int ret = 0;
5148 
5149 	fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
5150 	tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
5151 
5152 	if (modifier_has_dcc(modifier) && !force_disable_dcc) {
5153 		uint64_t dcc_address = afb->address + afb->base.offsets[1];
5154 		bool independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
5155 		bool independent_128b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_128B, modifier);
5156 
5157 		dcc->enable = 1;
5158 		dcc->meta_pitch = afb->base.pitches[1];
5159 		dcc->independent_64b_blks = independent_64b_blks;
5160 		if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) {
5161 			if (independent_64b_blks && independent_128b_blks)
5162 				dcc->dcc_ind_blk = hubp_ind_block_64b_no_128bcl;
5163 			else if (independent_128b_blks)
5164 				dcc->dcc_ind_blk = hubp_ind_block_128b;
5165 			else if (independent_64b_blks && !independent_128b_blks)
5166 				dcc->dcc_ind_blk = hubp_ind_block_64b;
5167 			else
5168 				dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5169 		} else {
5170 			if (independent_64b_blks)
5171 				dcc->dcc_ind_blk = hubp_ind_block_64b;
5172 			else
5173 				dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5174 		}
5175 
5176 		address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
5177 		address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
5178 	}
5179 
5180 	ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
5181 	if (ret)
5182 		drm_dbg_kms(adev_to_drm(adev), "validate_dcc: returned error: %d\n", ret);
5183 
5184 	return ret;
5185 }
5186 
5187 static int
5188 fill_plane_buffer_attributes(struct amdgpu_device *adev,
5189 			     const struct amdgpu_framebuffer *afb,
5190 			     const enum surface_pixel_format format,
5191 			     const enum dc_rotation_angle rotation,
5192 			     const uint64_t tiling_flags,
5193 			     union dc_tiling_info *tiling_info,
5194 			     struct plane_size *plane_size,
5195 			     struct dc_plane_dcc_param *dcc,
5196 			     struct dc_plane_address *address,
5197 			     bool tmz_surface,
5198 			     bool force_disable_dcc)
5199 {
5200 	const struct drm_framebuffer *fb = &afb->base;
5201 	int ret;
5202 
5203 	memset(tiling_info, 0, sizeof(*tiling_info));
5204 	memset(plane_size, 0, sizeof(*plane_size));
5205 	memset(dcc, 0, sizeof(*dcc));
5206 	memset(address, 0, sizeof(*address));
5207 
5208 	address->tmz_surface = tmz_surface;
5209 
5210 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
5211 		uint64_t addr = afb->address + fb->offsets[0];
5212 
5213 		plane_size->surface_size.x = 0;
5214 		plane_size->surface_size.y = 0;
5215 		plane_size->surface_size.width = fb->width;
5216 		plane_size->surface_size.height = fb->height;
5217 		plane_size->surface_pitch =
5218 			fb->pitches[0] / fb->format->cpp[0];
5219 
5220 		address->type = PLN_ADDR_TYPE_GRAPHICS;
5221 		address->grph.addr.low_part = lower_32_bits(addr);
5222 		address->grph.addr.high_part = upper_32_bits(addr);
5223 	} else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
5224 		uint64_t luma_addr = afb->address + fb->offsets[0];
5225 		uint64_t chroma_addr = afb->address + fb->offsets[1];
5226 
5227 		plane_size->surface_size.x = 0;
5228 		plane_size->surface_size.y = 0;
5229 		plane_size->surface_size.width = fb->width;
5230 		plane_size->surface_size.height = fb->height;
5231 		plane_size->surface_pitch =
5232 			fb->pitches[0] / fb->format->cpp[0];
5233 
5234 		plane_size->chroma_size.x = 0;
5235 		plane_size->chroma_size.y = 0;
5236 		/* TODO: set these based on surface format */
5237 		plane_size->chroma_size.width = fb->width / 2;
5238 		plane_size->chroma_size.height = fb->height / 2;
5239 
5240 		plane_size->chroma_pitch =
5241 			fb->pitches[1] / fb->format->cpp[1];
5242 
5243 		address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
5244 		address->video_progressive.luma_addr.low_part =
5245 			lower_32_bits(luma_addr);
5246 		address->video_progressive.luma_addr.high_part =
5247 			upper_32_bits(luma_addr);
5248 		address->video_progressive.chroma_addr.low_part =
5249 			lower_32_bits(chroma_addr);
5250 		address->video_progressive.chroma_addr.high_part =
5251 			upper_32_bits(chroma_addr);
5252 	}
5253 
5254 	if (adev->family >= AMDGPU_FAMILY_AI) {
5255 		ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
5256 								rotation, plane_size,
5257 								tiling_info, dcc,
5258 								address,
5259 								force_disable_dcc);
5260 		if (ret)
5261 			return ret;
5262 	} else {
5263 		fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
5264 	}
5265 
5266 	return 0;
5267 }
5268 
5269 static void
5270 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
5271 			       bool *per_pixel_alpha, bool *global_alpha,
5272 			       int *global_alpha_value)
5273 {
5274 	*per_pixel_alpha = false;
5275 	*global_alpha = false;
5276 	*global_alpha_value = 0xff;
5277 
5278 	if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
5279 		return;
5280 
5281 	if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
5282 		static const uint32_t alpha_formats[] = {
5283 			DRM_FORMAT_ARGB8888,
5284 			DRM_FORMAT_RGBA8888,
5285 			DRM_FORMAT_ABGR8888,
5286 		};
5287 		uint32_t format = plane_state->fb->format->format;
5288 		unsigned int i;
5289 
5290 		for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
5291 			if (format == alpha_formats[i]) {
5292 				*per_pixel_alpha = true;
5293 				break;
5294 			}
5295 		}
5296 	}
5297 
5298 	if (plane_state->alpha < 0xffff) {
5299 		*global_alpha = true;
5300 		*global_alpha_value = plane_state->alpha >> 8;
5301 	}
5302 }
5303 
5304 static int
5305 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
5306 			    const enum surface_pixel_format format,
5307 			    enum dc_color_space *color_space)
5308 {
5309 	bool full_range;
5310 
5311 	*color_space = COLOR_SPACE_SRGB;
5312 
5313 	/* DRM color properties only affect non-RGB formats. */
5314 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
5315 		return 0;
5316 
5317 	full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
5318 
5319 	switch (plane_state->color_encoding) {
5320 	case DRM_COLOR_YCBCR_BT601:
5321 		if (full_range)
5322 			*color_space = COLOR_SPACE_YCBCR601;
5323 		else
5324 			*color_space = COLOR_SPACE_YCBCR601_LIMITED;
5325 		break;
5326 
5327 	case DRM_COLOR_YCBCR_BT709:
5328 		if (full_range)
5329 			*color_space = COLOR_SPACE_YCBCR709;
5330 		else
5331 			*color_space = COLOR_SPACE_YCBCR709_LIMITED;
5332 		break;
5333 
5334 	case DRM_COLOR_YCBCR_BT2020:
5335 		if (full_range)
5336 			*color_space = COLOR_SPACE_2020_YCBCR;
5337 		else
5338 			return -EINVAL;
5339 		break;
5340 
5341 	default:
5342 		return -EINVAL;
5343 	}
5344 
5345 	return 0;
5346 }
5347 
5348 static int
5349 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
5350 			    const struct drm_plane_state *plane_state,
5351 			    const uint64_t tiling_flags,
5352 			    struct dc_plane_info *plane_info,
5353 			    struct dc_plane_address *address,
5354 			    bool tmz_surface,
5355 			    bool force_disable_dcc)
5356 {
5357 	const struct drm_framebuffer *fb = plane_state->fb;
5358 	const struct amdgpu_framebuffer *afb =
5359 		to_amdgpu_framebuffer(plane_state->fb);
5360 	int ret;
5361 
5362 	memset(plane_info, 0, sizeof(*plane_info));
5363 
5364 	switch (fb->format->format) {
5365 	case DRM_FORMAT_C8:
5366 		plane_info->format =
5367 			SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
5368 		break;
5369 	case DRM_FORMAT_RGB565:
5370 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
5371 		break;
5372 	case DRM_FORMAT_XRGB8888:
5373 	case DRM_FORMAT_ARGB8888:
5374 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
5375 		break;
5376 	case DRM_FORMAT_XRGB2101010:
5377 	case DRM_FORMAT_ARGB2101010:
5378 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
5379 		break;
5380 	case DRM_FORMAT_XBGR2101010:
5381 	case DRM_FORMAT_ABGR2101010:
5382 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
5383 		break;
5384 	case DRM_FORMAT_XBGR8888:
5385 	case DRM_FORMAT_ABGR8888:
5386 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
5387 		break;
5388 	case DRM_FORMAT_NV21:
5389 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
5390 		break;
5391 	case DRM_FORMAT_NV12:
5392 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
5393 		break;
5394 	case DRM_FORMAT_P010:
5395 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
5396 		break;
5397 	case DRM_FORMAT_XRGB16161616F:
5398 	case DRM_FORMAT_ARGB16161616F:
5399 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
5400 		break;
5401 	case DRM_FORMAT_XBGR16161616F:
5402 	case DRM_FORMAT_ABGR16161616F:
5403 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
5404 		break;
5405 	case DRM_FORMAT_XRGB16161616:
5406 	case DRM_FORMAT_ARGB16161616:
5407 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616;
5408 		break;
5409 	case DRM_FORMAT_XBGR16161616:
5410 	case DRM_FORMAT_ABGR16161616:
5411 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616;
5412 		break;
5413 	default:
5414 		DRM_ERROR(
5415 			"Unsupported screen format %p4cc\n",
5416 			&fb->format->format);
5417 		return -EINVAL;
5418 	}
5419 
5420 	switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
5421 	case DRM_MODE_ROTATE_0:
5422 		plane_info->rotation = ROTATION_ANGLE_0;
5423 		break;
5424 	case DRM_MODE_ROTATE_90:
5425 		plane_info->rotation = ROTATION_ANGLE_90;
5426 		break;
5427 	case DRM_MODE_ROTATE_180:
5428 		plane_info->rotation = ROTATION_ANGLE_180;
5429 		break;
5430 	case DRM_MODE_ROTATE_270:
5431 		plane_info->rotation = ROTATION_ANGLE_270;
5432 		break;
5433 	default:
5434 		plane_info->rotation = ROTATION_ANGLE_0;
5435 		break;
5436 	}
5437 
5438 	plane_info->visible = true;
5439 	plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
5440 
5441 	plane_info->layer_index = 0;
5442 
5443 	ret = fill_plane_color_attributes(plane_state, plane_info->format,
5444 					  &plane_info->color_space);
5445 	if (ret)
5446 		return ret;
5447 
5448 	ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
5449 					   plane_info->rotation, tiling_flags,
5450 					   &plane_info->tiling_info,
5451 					   &plane_info->plane_size,
5452 					   &plane_info->dcc, address, tmz_surface,
5453 					   force_disable_dcc);
5454 	if (ret)
5455 		return ret;
5456 
5457 	fill_blending_from_plane_state(
5458 		plane_state, &plane_info->per_pixel_alpha,
5459 		&plane_info->global_alpha, &plane_info->global_alpha_value);
5460 
5461 	return 0;
5462 }
5463 
5464 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
5465 				    struct dc_plane_state *dc_plane_state,
5466 				    struct drm_plane_state *plane_state,
5467 				    struct drm_crtc_state *crtc_state)
5468 {
5469 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
5470 	struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
5471 	struct dc_scaling_info scaling_info;
5472 	struct dc_plane_info plane_info;
5473 	int ret;
5474 	bool force_disable_dcc = false;
5475 
5476 	ret = fill_dc_scaling_info(plane_state, &scaling_info);
5477 	if (ret)
5478 		return ret;
5479 
5480 	dc_plane_state->src_rect = scaling_info.src_rect;
5481 	dc_plane_state->dst_rect = scaling_info.dst_rect;
5482 	dc_plane_state->clip_rect = scaling_info.clip_rect;
5483 	dc_plane_state->scaling_quality = scaling_info.scaling_quality;
5484 
5485 	force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
5486 	ret = fill_dc_plane_info_and_addr(adev, plane_state,
5487 					  afb->tiling_flags,
5488 					  &plane_info,
5489 					  &dc_plane_state->address,
5490 					  afb->tmz_surface,
5491 					  force_disable_dcc);
5492 	if (ret)
5493 		return ret;
5494 
5495 	dc_plane_state->format = plane_info.format;
5496 	dc_plane_state->color_space = plane_info.color_space;
5497 	dc_plane_state->format = plane_info.format;
5498 	dc_plane_state->plane_size = plane_info.plane_size;
5499 	dc_plane_state->rotation = plane_info.rotation;
5500 	dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
5501 	dc_plane_state->stereo_format = plane_info.stereo_format;
5502 	dc_plane_state->tiling_info = plane_info.tiling_info;
5503 	dc_plane_state->visible = plane_info.visible;
5504 	dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
5505 	dc_plane_state->global_alpha = plane_info.global_alpha;
5506 	dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
5507 	dc_plane_state->dcc = plane_info.dcc;
5508 	dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
5509 	dc_plane_state->flip_int_enabled = true;
5510 
5511 	/*
5512 	 * Always set input transfer function, since plane state is refreshed
5513 	 * every time.
5514 	 */
5515 	ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
5516 	if (ret)
5517 		return ret;
5518 
5519 	return 0;
5520 }
5521 
5522 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
5523 					   const struct dm_connector_state *dm_state,
5524 					   struct dc_stream_state *stream)
5525 {
5526 	enum amdgpu_rmx_type rmx_type;
5527 
5528 	struct rect src = { 0 }; /* viewport in composition space*/
5529 	struct rect dst = { 0 }; /* stream addressable area */
5530 
5531 	/* no mode. nothing to be done */
5532 	if (!mode)
5533 		return;
5534 
5535 	/* Full screen scaling by default */
5536 	src.width = mode->hdisplay;
5537 	src.height = mode->vdisplay;
5538 	dst.width = stream->timing.h_addressable;
5539 	dst.height = stream->timing.v_addressable;
5540 
5541 	if (dm_state) {
5542 		rmx_type = dm_state->scaling;
5543 		if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
5544 			if (src.width * dst.height <
5545 					src.height * dst.width) {
5546 				/* height needs less upscaling/more downscaling */
5547 				dst.width = src.width *
5548 						dst.height / src.height;
5549 			} else {
5550 				/* width needs less upscaling/more downscaling */
5551 				dst.height = src.height *
5552 						dst.width / src.width;
5553 			}
5554 		} else if (rmx_type == RMX_CENTER) {
5555 			dst = src;
5556 		}
5557 
5558 		dst.x = (stream->timing.h_addressable - dst.width) / 2;
5559 		dst.y = (stream->timing.v_addressable - dst.height) / 2;
5560 
5561 		if (dm_state->underscan_enable) {
5562 			dst.x += dm_state->underscan_hborder / 2;
5563 			dst.y += dm_state->underscan_vborder / 2;
5564 			dst.width -= dm_state->underscan_hborder;
5565 			dst.height -= dm_state->underscan_vborder;
5566 		}
5567 	}
5568 
5569 	stream->src = src;
5570 	stream->dst = dst;
5571 
5572 	DRM_DEBUG_KMS("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
5573 		      dst.x, dst.y, dst.width, dst.height);
5574 
5575 }
5576 
5577 static enum dc_color_depth
5578 convert_color_depth_from_display_info(const struct drm_connector *connector,
5579 				      bool is_y420, int requested_bpc)
5580 {
5581 	uint8_t bpc;
5582 
5583 	if (is_y420) {
5584 		bpc = 8;
5585 
5586 		/* Cap display bpc based on HDMI 2.0 HF-VSDB */
5587 		if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
5588 			bpc = 16;
5589 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
5590 			bpc = 12;
5591 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
5592 			bpc = 10;
5593 	} else {
5594 		bpc = (uint8_t)connector->display_info.bpc;
5595 		/* Assume 8 bpc by default if no bpc is specified. */
5596 		bpc = bpc ? bpc : 8;
5597 	}
5598 
5599 	if (requested_bpc > 0) {
5600 		/*
5601 		 * Cap display bpc based on the user requested value.
5602 		 *
5603 		 * The value for state->max_bpc may not correctly updated
5604 		 * depending on when the connector gets added to the state
5605 		 * or if this was called outside of atomic check, so it
5606 		 * can't be used directly.
5607 		 */
5608 		bpc = min_t(u8, bpc, requested_bpc);
5609 
5610 		/* Round down to the nearest even number. */
5611 		bpc = bpc - (bpc & 1);
5612 	}
5613 
5614 	switch (bpc) {
5615 	case 0:
5616 		/*
5617 		 * Temporary Work around, DRM doesn't parse color depth for
5618 		 * EDID revision before 1.4
5619 		 * TODO: Fix edid parsing
5620 		 */
5621 		return COLOR_DEPTH_888;
5622 	case 6:
5623 		return COLOR_DEPTH_666;
5624 	case 8:
5625 		return COLOR_DEPTH_888;
5626 	case 10:
5627 		return COLOR_DEPTH_101010;
5628 	case 12:
5629 		return COLOR_DEPTH_121212;
5630 	case 14:
5631 		return COLOR_DEPTH_141414;
5632 	case 16:
5633 		return COLOR_DEPTH_161616;
5634 	default:
5635 		return COLOR_DEPTH_UNDEFINED;
5636 	}
5637 }
5638 
5639 static enum dc_aspect_ratio
5640 get_aspect_ratio(const struct drm_display_mode *mode_in)
5641 {
5642 	/* 1-1 mapping, since both enums follow the HDMI spec. */
5643 	return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
5644 }
5645 
5646 static enum dc_color_space
5647 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
5648 {
5649 	enum dc_color_space color_space = COLOR_SPACE_SRGB;
5650 
5651 	switch (dc_crtc_timing->pixel_encoding)	{
5652 	case PIXEL_ENCODING_YCBCR422:
5653 	case PIXEL_ENCODING_YCBCR444:
5654 	case PIXEL_ENCODING_YCBCR420:
5655 	{
5656 		/*
5657 		 * 27030khz is the separation point between HDTV and SDTV
5658 		 * according to HDMI spec, we use YCbCr709 and YCbCr601
5659 		 * respectively
5660 		 */
5661 		if (dc_crtc_timing->pix_clk_100hz > 270300) {
5662 			if (dc_crtc_timing->flags.Y_ONLY)
5663 				color_space =
5664 					COLOR_SPACE_YCBCR709_LIMITED;
5665 			else
5666 				color_space = COLOR_SPACE_YCBCR709;
5667 		} else {
5668 			if (dc_crtc_timing->flags.Y_ONLY)
5669 				color_space =
5670 					COLOR_SPACE_YCBCR601_LIMITED;
5671 			else
5672 				color_space = COLOR_SPACE_YCBCR601;
5673 		}
5674 
5675 	}
5676 	break;
5677 	case PIXEL_ENCODING_RGB:
5678 		color_space = COLOR_SPACE_SRGB;
5679 		break;
5680 
5681 	default:
5682 		WARN_ON(1);
5683 		break;
5684 	}
5685 
5686 	return color_space;
5687 }
5688 
5689 static bool adjust_colour_depth_from_display_info(
5690 	struct dc_crtc_timing *timing_out,
5691 	const struct drm_display_info *info)
5692 {
5693 	enum dc_color_depth depth = timing_out->display_color_depth;
5694 	int normalized_clk;
5695 	do {
5696 		normalized_clk = timing_out->pix_clk_100hz / 10;
5697 		/* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5698 		if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5699 			normalized_clk /= 2;
5700 		/* Adjusting pix clock following on HDMI spec based on colour depth */
5701 		switch (depth) {
5702 		case COLOR_DEPTH_888:
5703 			break;
5704 		case COLOR_DEPTH_101010:
5705 			normalized_clk = (normalized_clk * 30) / 24;
5706 			break;
5707 		case COLOR_DEPTH_121212:
5708 			normalized_clk = (normalized_clk * 36) / 24;
5709 			break;
5710 		case COLOR_DEPTH_161616:
5711 			normalized_clk = (normalized_clk * 48) / 24;
5712 			break;
5713 		default:
5714 			/* The above depths are the only ones valid for HDMI. */
5715 			return false;
5716 		}
5717 		if (normalized_clk <= info->max_tmds_clock) {
5718 			timing_out->display_color_depth = depth;
5719 			return true;
5720 		}
5721 	} while (--depth > COLOR_DEPTH_666);
5722 	return false;
5723 }
5724 
5725 static void fill_stream_properties_from_drm_display_mode(
5726 	struct dc_stream_state *stream,
5727 	const struct drm_display_mode *mode_in,
5728 	const struct drm_connector *connector,
5729 	const struct drm_connector_state *connector_state,
5730 	const struct dc_stream_state *old_stream,
5731 	int requested_bpc)
5732 {
5733 	struct dc_crtc_timing *timing_out = &stream->timing;
5734 	const struct drm_display_info *info = &connector->display_info;
5735 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5736 	struct hdmi_vendor_infoframe hv_frame;
5737 	struct hdmi_avi_infoframe avi_frame;
5738 
5739 	memset(&hv_frame, 0, sizeof(hv_frame));
5740 	memset(&avi_frame, 0, sizeof(avi_frame));
5741 
5742 	timing_out->h_border_left = 0;
5743 	timing_out->h_border_right = 0;
5744 	timing_out->v_border_top = 0;
5745 	timing_out->v_border_bottom = 0;
5746 	/* TODO: un-hardcode */
5747 	if (drm_mode_is_420_only(info, mode_in)
5748 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5749 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5750 	else if (drm_mode_is_420_also(info, mode_in)
5751 			&& aconnector->force_yuv420_output)
5752 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5753 	else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
5754 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5755 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5756 	else
5757 		timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5758 
5759 	timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5760 	timing_out->display_color_depth = convert_color_depth_from_display_info(
5761 		connector,
5762 		(timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5763 		requested_bpc);
5764 	timing_out->scan_type = SCANNING_TYPE_NODATA;
5765 	timing_out->hdmi_vic = 0;
5766 
5767 	if(old_stream) {
5768 		timing_out->vic = old_stream->timing.vic;
5769 		timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5770 		timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5771 	} else {
5772 		timing_out->vic = drm_match_cea_mode(mode_in);
5773 		if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5774 			timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5775 		if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5776 			timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5777 	}
5778 
5779 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5780 		drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5781 		timing_out->vic = avi_frame.video_code;
5782 		drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5783 		timing_out->hdmi_vic = hv_frame.vic;
5784 	}
5785 
5786 	if (is_freesync_video_mode(mode_in, aconnector)) {
5787 		timing_out->h_addressable = mode_in->hdisplay;
5788 		timing_out->h_total = mode_in->htotal;
5789 		timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5790 		timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5791 		timing_out->v_total = mode_in->vtotal;
5792 		timing_out->v_addressable = mode_in->vdisplay;
5793 		timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5794 		timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5795 		timing_out->pix_clk_100hz = mode_in->clock * 10;
5796 	} else {
5797 		timing_out->h_addressable = mode_in->crtc_hdisplay;
5798 		timing_out->h_total = mode_in->crtc_htotal;
5799 		timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5800 		timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5801 		timing_out->v_total = mode_in->crtc_vtotal;
5802 		timing_out->v_addressable = mode_in->crtc_vdisplay;
5803 		timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5804 		timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5805 		timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5806 	}
5807 
5808 	timing_out->aspect_ratio = get_aspect_ratio(mode_in);
5809 
5810 	stream->output_color_space = get_output_color_space(timing_out);
5811 
5812 	stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5813 	stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
5814 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5815 		if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5816 		    drm_mode_is_420_also(info, mode_in) &&
5817 		    timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5818 			timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5819 			adjust_colour_depth_from_display_info(timing_out, info);
5820 		}
5821 	}
5822 }
5823 
5824 static void fill_audio_info(struct audio_info *audio_info,
5825 			    const struct drm_connector *drm_connector,
5826 			    const struct dc_sink *dc_sink)
5827 {
5828 	int i = 0;
5829 	int cea_revision = 0;
5830 	const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5831 
5832 	audio_info->manufacture_id = edid_caps->manufacturer_id;
5833 	audio_info->product_id = edid_caps->product_id;
5834 
5835 	cea_revision = drm_connector->display_info.cea_rev;
5836 
5837 	strscpy(audio_info->display_name,
5838 		edid_caps->display_name,
5839 		AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
5840 
5841 	if (cea_revision >= 3) {
5842 		audio_info->mode_count = edid_caps->audio_mode_count;
5843 
5844 		for (i = 0; i < audio_info->mode_count; ++i) {
5845 			audio_info->modes[i].format_code =
5846 					(enum audio_format_code)
5847 					(edid_caps->audio_modes[i].format_code);
5848 			audio_info->modes[i].channel_count =
5849 					edid_caps->audio_modes[i].channel_count;
5850 			audio_info->modes[i].sample_rates.all =
5851 					edid_caps->audio_modes[i].sample_rate;
5852 			audio_info->modes[i].sample_size =
5853 					edid_caps->audio_modes[i].sample_size;
5854 		}
5855 	}
5856 
5857 	audio_info->flags.all = edid_caps->speaker_flags;
5858 
5859 	/* TODO: We only check for the progressive mode, check for interlace mode too */
5860 	if (drm_connector->latency_present[0]) {
5861 		audio_info->video_latency = drm_connector->video_latency[0];
5862 		audio_info->audio_latency = drm_connector->audio_latency[0];
5863 	}
5864 
5865 	/* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5866 
5867 }
5868 
5869 static void
5870 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5871 				      struct drm_display_mode *dst_mode)
5872 {
5873 	dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5874 	dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5875 	dst_mode->crtc_clock = src_mode->crtc_clock;
5876 	dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5877 	dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
5878 	dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
5879 	dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5880 	dst_mode->crtc_htotal = src_mode->crtc_htotal;
5881 	dst_mode->crtc_hskew = src_mode->crtc_hskew;
5882 	dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5883 	dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5884 	dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5885 	dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5886 	dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5887 }
5888 
5889 static void
5890 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5891 					const struct drm_display_mode *native_mode,
5892 					bool scale_enabled)
5893 {
5894 	if (scale_enabled) {
5895 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5896 	} else if (native_mode->clock == drm_mode->clock &&
5897 			native_mode->htotal == drm_mode->htotal &&
5898 			native_mode->vtotal == drm_mode->vtotal) {
5899 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5900 	} else {
5901 		/* no scaling nor amdgpu inserted, no need to patch */
5902 	}
5903 }
5904 
5905 static struct dc_sink *
5906 create_fake_sink(struct amdgpu_dm_connector *aconnector)
5907 {
5908 	struct dc_sink_init_data sink_init_data = { 0 };
5909 	struct dc_sink *sink = NULL;
5910 	sink_init_data.link = aconnector->dc_link;
5911 	sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5912 
5913 	sink = dc_sink_create(&sink_init_data);
5914 	if (!sink) {
5915 		DRM_ERROR("Failed to create sink!\n");
5916 		return NULL;
5917 	}
5918 	sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
5919 
5920 	return sink;
5921 }
5922 
5923 static void set_multisync_trigger_params(
5924 		struct dc_stream_state *stream)
5925 {
5926 	struct dc_stream_state *master = NULL;
5927 
5928 	if (stream->triggered_crtc_reset.enabled) {
5929 		master = stream->triggered_crtc_reset.event_source;
5930 		stream->triggered_crtc_reset.event =
5931 			master->timing.flags.VSYNC_POSITIVE_POLARITY ?
5932 			CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
5933 		stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
5934 	}
5935 }
5936 
5937 static void set_master_stream(struct dc_stream_state *stream_set[],
5938 			      int stream_count)
5939 {
5940 	int j, highest_rfr = 0, master_stream = 0;
5941 
5942 	for (j = 0;  j < stream_count; j++) {
5943 		if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
5944 			int refresh_rate = 0;
5945 
5946 			refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
5947 				(stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
5948 			if (refresh_rate > highest_rfr) {
5949 				highest_rfr = refresh_rate;
5950 				master_stream = j;
5951 			}
5952 		}
5953 	}
5954 	for (j = 0;  j < stream_count; j++) {
5955 		if (stream_set[j])
5956 			stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
5957 	}
5958 }
5959 
5960 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
5961 {
5962 	int i = 0;
5963 	struct dc_stream_state *stream;
5964 
5965 	if (context->stream_count < 2)
5966 		return;
5967 	for (i = 0; i < context->stream_count ; i++) {
5968 		if (!context->streams[i])
5969 			continue;
5970 		/*
5971 		 * TODO: add a function to read AMD VSDB bits and set
5972 		 * crtc_sync_master.multi_sync_enabled flag
5973 		 * For now it's set to false
5974 		 */
5975 	}
5976 
5977 	set_master_stream(context->streams, context->stream_count);
5978 
5979 	for (i = 0; i < context->stream_count ; i++) {
5980 		stream = context->streams[i];
5981 
5982 		if (!stream)
5983 			continue;
5984 
5985 		set_multisync_trigger_params(stream);
5986 	}
5987 }
5988 
5989 #if defined(CONFIG_DRM_AMD_DC_DCN)
5990 static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
5991 							struct dc_sink *sink, struct dc_stream_state *stream,
5992 							struct dsc_dec_dpcd_caps *dsc_caps)
5993 {
5994 	stream->timing.flags.DSC = 0;
5995 
5996 	if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5997 		dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
5998 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
5999 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
6000 				      dsc_caps);
6001 	}
6002 }
6003 
6004 static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
6005 										struct dc_sink *sink, struct dc_stream_state *stream,
6006 										struct dsc_dec_dpcd_caps *dsc_caps)
6007 {
6008 	struct drm_connector *drm_connector = &aconnector->base;
6009 	uint32_t link_bandwidth_kbps;
6010 	uint32_t max_dsc_target_bpp_limit_override = 0;
6011 
6012 	link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
6013 							dc_link_get_link_cap(aconnector->dc_link));
6014 
6015 	if (stream->link && stream->link->local_sink)
6016 		max_dsc_target_bpp_limit_override =
6017 			stream->link->local_sink->edid_caps.panel_patch.max_dsc_target_bpp_limit;
6018 
6019 	/* Set DSC policy according to dsc_clock_en */
6020 	dc_dsc_policy_set_enable_dsc_when_not_needed(
6021 		aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
6022 
6023 	if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6024 
6025 		if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
6026 						dsc_caps,
6027 						aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
6028 						max_dsc_target_bpp_limit_override,
6029 						link_bandwidth_kbps,
6030 						&stream->timing,
6031 						&stream->timing.dsc_cfg)) {
6032 			stream->timing.flags.DSC = 1;
6033 			DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n", __func__, drm_connector->name);
6034 		}
6035 	}
6036 
6037 	/* Overwrite the stream flag if DSC is enabled through debugfs */
6038 	if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
6039 		stream->timing.flags.DSC = 1;
6040 
6041 	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
6042 		stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
6043 
6044 	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
6045 		stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
6046 
6047 	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
6048 		stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
6049 }
6050 #endif
6051 
6052 /**
6053  * DOC: FreeSync Video
6054  *
6055  * When a userspace application wants to play a video, the content follows a
6056  * standard format definition that usually specifies the FPS for that format.
6057  * The below list illustrates some video format and the expected FPS,
6058  * respectively:
6059  *
6060  * - TV/NTSC (23.976 FPS)
6061  * - Cinema (24 FPS)
6062  * - TV/PAL (25 FPS)
6063  * - TV/NTSC (29.97 FPS)
6064  * - TV/NTSC (30 FPS)
6065  * - Cinema HFR (48 FPS)
6066  * - TV/PAL (50 FPS)
6067  * - Commonly used (60 FPS)
6068  * - Multiples of 24 (48,72,96,120 FPS)
6069  *
6070  * The list of standards video format is not huge and can be added to the
6071  * connector modeset list beforehand. With that, userspace can leverage
6072  * FreeSync to extends the front porch in order to attain the target refresh
6073  * rate. Such a switch will happen seamlessly, without screen blanking or
6074  * reprogramming of the output in any other way. If the userspace requests a
6075  * modesetting change compatible with FreeSync modes that only differ in the
6076  * refresh rate, DC will skip the full update and avoid blink during the
6077  * transition. For example, the video player can change the modesetting from
6078  * 60Hz to 30Hz for playing TV/NTSC content when it goes full screen without
6079  * causing any display blink. This same concept can be applied to a mode
6080  * setting change.
6081  */
6082 static struct drm_display_mode *
6083 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
6084 			  bool use_probed_modes)
6085 {
6086 	struct drm_display_mode *m, *m_pref = NULL;
6087 	u16 current_refresh, highest_refresh;
6088 	struct list_head *list_head = use_probed_modes ?
6089 						    &aconnector->base.probed_modes :
6090 						    &aconnector->base.modes;
6091 
6092 	if (aconnector->freesync_vid_base.clock != 0)
6093 		return &aconnector->freesync_vid_base;
6094 
6095 	/* Find the preferred mode */
6096 	list_for_each_entry (m, list_head, head) {
6097 		if (m->type & DRM_MODE_TYPE_PREFERRED) {
6098 			m_pref = m;
6099 			break;
6100 		}
6101 	}
6102 
6103 	if (!m_pref) {
6104 		/* Probably an EDID with no preferred mode. Fallback to first entry */
6105 		m_pref = list_first_entry_or_null(
6106 			&aconnector->base.modes, struct drm_display_mode, head);
6107 		if (!m_pref) {
6108 			DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
6109 			return NULL;
6110 		}
6111 	}
6112 
6113 	highest_refresh = drm_mode_vrefresh(m_pref);
6114 
6115 	/*
6116 	 * Find the mode with highest refresh rate with same resolution.
6117 	 * For some monitors, preferred mode is not the mode with highest
6118 	 * supported refresh rate.
6119 	 */
6120 	list_for_each_entry (m, list_head, head) {
6121 		current_refresh  = drm_mode_vrefresh(m);
6122 
6123 		if (m->hdisplay == m_pref->hdisplay &&
6124 		    m->vdisplay == m_pref->vdisplay &&
6125 		    highest_refresh < current_refresh) {
6126 			highest_refresh = current_refresh;
6127 			m_pref = m;
6128 		}
6129 	}
6130 
6131 	aconnector->freesync_vid_base = *m_pref;
6132 	return m_pref;
6133 }
6134 
6135 static bool is_freesync_video_mode(const struct drm_display_mode *mode,
6136 				   struct amdgpu_dm_connector *aconnector)
6137 {
6138 	struct drm_display_mode *high_mode;
6139 	int timing_diff;
6140 
6141 	high_mode = get_highest_refresh_rate_mode(aconnector, false);
6142 	if (!high_mode || !mode)
6143 		return false;
6144 
6145 	timing_diff = high_mode->vtotal - mode->vtotal;
6146 
6147 	if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
6148 	    high_mode->hdisplay != mode->hdisplay ||
6149 	    high_mode->vdisplay != mode->vdisplay ||
6150 	    high_mode->hsync_start != mode->hsync_start ||
6151 	    high_mode->hsync_end != mode->hsync_end ||
6152 	    high_mode->htotal != mode->htotal ||
6153 	    high_mode->hskew != mode->hskew ||
6154 	    high_mode->vscan != mode->vscan ||
6155 	    high_mode->vsync_start - mode->vsync_start != timing_diff ||
6156 	    high_mode->vsync_end - mode->vsync_end != timing_diff)
6157 		return false;
6158 	else
6159 		return true;
6160 }
6161 
6162 static struct dc_stream_state *
6163 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6164 		       const struct drm_display_mode *drm_mode,
6165 		       const struct dm_connector_state *dm_state,
6166 		       const struct dc_stream_state *old_stream,
6167 		       int requested_bpc)
6168 {
6169 	struct drm_display_mode *preferred_mode = NULL;
6170 	struct drm_connector *drm_connector;
6171 	const struct drm_connector_state *con_state =
6172 		dm_state ? &dm_state->base : NULL;
6173 	struct dc_stream_state *stream = NULL;
6174 	struct drm_display_mode mode = *drm_mode;
6175 	struct drm_display_mode saved_mode;
6176 	struct drm_display_mode *freesync_mode = NULL;
6177 	bool native_mode_found = false;
6178 	bool recalculate_timing = false;
6179 	bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
6180 	int mode_refresh;
6181 	int preferred_refresh = 0;
6182 #if defined(CONFIG_DRM_AMD_DC_DCN)
6183 	struct dsc_dec_dpcd_caps dsc_caps;
6184 #endif
6185 	struct dc_sink *sink = NULL;
6186 
6187 	memset(&saved_mode, 0, sizeof(saved_mode));
6188 
6189 	if (aconnector == NULL) {
6190 		DRM_ERROR("aconnector is NULL!\n");
6191 		return stream;
6192 	}
6193 
6194 	drm_connector = &aconnector->base;
6195 
6196 	if (!aconnector->dc_sink) {
6197 		sink = create_fake_sink(aconnector);
6198 		if (!sink)
6199 			return stream;
6200 	} else {
6201 		sink = aconnector->dc_sink;
6202 		dc_sink_retain(sink);
6203 	}
6204 
6205 	stream = dc_create_stream_for_sink(sink);
6206 
6207 	if (stream == NULL) {
6208 		DRM_ERROR("Failed to create stream for sink!\n");
6209 		goto finish;
6210 	}
6211 
6212 	stream->dm_stream_context = aconnector;
6213 
6214 	stream->timing.flags.LTE_340MCSC_SCRAMBLE =
6215 		drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
6216 
6217 	list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
6218 		/* Search for preferred mode */
6219 		if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
6220 			native_mode_found = true;
6221 			break;
6222 		}
6223 	}
6224 	if (!native_mode_found)
6225 		preferred_mode = list_first_entry_or_null(
6226 				&aconnector->base.modes,
6227 				struct drm_display_mode,
6228 				head);
6229 
6230 	mode_refresh = drm_mode_vrefresh(&mode);
6231 
6232 	if (preferred_mode == NULL) {
6233 		/*
6234 		 * This may not be an error, the use case is when we have no
6235 		 * usermode calls to reset and set mode upon hotplug. In this
6236 		 * case, we call set mode ourselves to restore the previous mode
6237 		 * and the modelist may not be filled in in time.
6238 		 */
6239 		DRM_DEBUG_DRIVER("No preferred mode found\n");
6240 	} else {
6241 		recalculate_timing = amdgpu_freesync_vid_mode &&
6242 				 is_freesync_video_mode(&mode, aconnector);
6243 		if (recalculate_timing) {
6244 			freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
6245 			saved_mode = mode;
6246 			mode = *freesync_mode;
6247 		} else {
6248 			decide_crtc_timing_for_drm_display_mode(
6249 				&mode, preferred_mode, scale);
6250 
6251 			preferred_refresh = drm_mode_vrefresh(preferred_mode);
6252 		}
6253 	}
6254 
6255 	if (recalculate_timing)
6256 		drm_mode_set_crtcinfo(&saved_mode, 0);
6257 	else if (!dm_state)
6258 		drm_mode_set_crtcinfo(&mode, 0);
6259 
6260        /*
6261 	* If scaling is enabled and refresh rate didn't change
6262 	* we copy the vic and polarities of the old timings
6263 	*/
6264 	if (!scale || mode_refresh != preferred_refresh)
6265 		fill_stream_properties_from_drm_display_mode(
6266 			stream, &mode, &aconnector->base, con_state, NULL,
6267 			requested_bpc);
6268 	else
6269 		fill_stream_properties_from_drm_display_mode(
6270 			stream, &mode, &aconnector->base, con_state, old_stream,
6271 			requested_bpc);
6272 
6273 #if defined(CONFIG_DRM_AMD_DC_DCN)
6274 	/* SST DSC determination policy */
6275 	update_dsc_caps(aconnector, sink, stream, &dsc_caps);
6276 	if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported)
6277 		apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps);
6278 #endif
6279 
6280 	update_stream_scaling_settings(&mode, dm_state, stream);
6281 
6282 	fill_audio_info(
6283 		&stream->audio_info,
6284 		drm_connector,
6285 		sink);
6286 
6287 	update_stream_signal(stream, sink);
6288 
6289 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
6290 		mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
6291 
6292 	if (stream->link->psr_settings.psr_feature_enabled) {
6293 		//
6294 		// should decide stream support vsc sdp colorimetry capability
6295 		// before building vsc info packet
6296 		//
6297 		stream->use_vsc_sdp_for_colorimetry = false;
6298 		if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
6299 			stream->use_vsc_sdp_for_colorimetry =
6300 				aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
6301 		} else {
6302 			if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
6303 				stream->use_vsc_sdp_for_colorimetry = true;
6304 		}
6305 		mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
6306 		aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
6307 
6308 	}
6309 finish:
6310 	dc_sink_release(sink);
6311 
6312 	return stream;
6313 }
6314 
6315 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
6316 {
6317 	drm_crtc_cleanup(crtc);
6318 	kfree(crtc);
6319 }
6320 
6321 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
6322 				  struct drm_crtc_state *state)
6323 {
6324 	struct dm_crtc_state *cur = to_dm_crtc_state(state);
6325 
6326 	/* TODO Destroy dc_stream objects are stream object is flattened */
6327 	if (cur->stream)
6328 		dc_stream_release(cur->stream);
6329 
6330 
6331 	__drm_atomic_helper_crtc_destroy_state(state);
6332 
6333 
6334 	kfree(state);
6335 }
6336 
6337 static void dm_crtc_reset_state(struct drm_crtc *crtc)
6338 {
6339 	struct dm_crtc_state *state;
6340 
6341 	if (crtc->state)
6342 		dm_crtc_destroy_state(crtc, crtc->state);
6343 
6344 	state = kzalloc(sizeof(*state), GFP_KERNEL);
6345 	if (WARN_ON(!state))
6346 		return;
6347 
6348 	__drm_atomic_helper_crtc_reset(crtc, &state->base);
6349 }
6350 
6351 static struct drm_crtc_state *
6352 dm_crtc_duplicate_state(struct drm_crtc *crtc)
6353 {
6354 	struct dm_crtc_state *state, *cur;
6355 
6356 	cur = to_dm_crtc_state(crtc->state);
6357 
6358 	if (WARN_ON(!crtc->state))
6359 		return NULL;
6360 
6361 	state = kzalloc(sizeof(*state), GFP_KERNEL);
6362 	if (!state)
6363 		return NULL;
6364 
6365 	__drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
6366 
6367 	if (cur->stream) {
6368 		state->stream = cur->stream;
6369 		dc_stream_retain(state->stream);
6370 	}
6371 
6372 	state->active_planes = cur->active_planes;
6373 	state->vrr_infopacket = cur->vrr_infopacket;
6374 	state->abm_level = cur->abm_level;
6375 	state->vrr_supported = cur->vrr_supported;
6376 	state->freesync_config = cur->freesync_config;
6377 	state->cm_has_degamma = cur->cm_has_degamma;
6378 	state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
6379 	state->force_dpms_off = cur->force_dpms_off;
6380 	/* TODO Duplicate dc_stream after objects are stream object is flattened */
6381 
6382 	return &state->base;
6383 }
6384 
6385 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
6386 static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
6387 {
6388 	crtc_debugfs_init(crtc);
6389 
6390 	return 0;
6391 }
6392 #endif
6393 
6394 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
6395 {
6396 	enum dc_irq_source irq_source;
6397 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6398 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6399 	int rc;
6400 
6401 	irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
6402 
6403 	rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
6404 
6405 	DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
6406 		      acrtc->crtc_id, enable ? "en" : "dis", rc);
6407 	return rc;
6408 }
6409 
6410 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
6411 {
6412 	enum dc_irq_source irq_source;
6413 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6414 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6415 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
6416 #if defined(CONFIG_DRM_AMD_DC_DCN)
6417 	struct amdgpu_display_manager *dm = &adev->dm;
6418 	struct vblank_control_work *work;
6419 #endif
6420 	int rc = 0;
6421 
6422 	if (enable) {
6423 		/* vblank irq on -> Only need vupdate irq in vrr mode */
6424 		if (amdgpu_dm_vrr_active(acrtc_state))
6425 			rc = dm_set_vupdate_irq(crtc, true);
6426 	} else {
6427 		/* vblank irq off -> vupdate irq off */
6428 		rc = dm_set_vupdate_irq(crtc, false);
6429 	}
6430 
6431 	if (rc)
6432 		return rc;
6433 
6434 	irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
6435 
6436 	if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
6437 		return -EBUSY;
6438 
6439 	if (amdgpu_in_reset(adev))
6440 		return 0;
6441 
6442 #if defined(CONFIG_DRM_AMD_DC_DCN)
6443 	if (dm->vblank_control_workqueue) {
6444 		work = kzalloc(sizeof(*work), GFP_ATOMIC);
6445 		if (!work)
6446 			return -ENOMEM;
6447 
6448 		INIT_WORK(&work->work, vblank_control_worker);
6449 		work->dm = dm;
6450 		work->acrtc = acrtc;
6451 		work->enable = enable;
6452 
6453 		if (acrtc_state->stream) {
6454 			dc_stream_retain(acrtc_state->stream);
6455 			work->stream = acrtc_state->stream;
6456 		}
6457 
6458 		queue_work(dm->vblank_control_workqueue, &work->work);
6459 	}
6460 #endif
6461 
6462 	return 0;
6463 }
6464 
6465 static int dm_enable_vblank(struct drm_crtc *crtc)
6466 {
6467 	return dm_set_vblank(crtc, true);
6468 }
6469 
6470 static void dm_disable_vblank(struct drm_crtc *crtc)
6471 {
6472 	dm_set_vblank(crtc, false);
6473 }
6474 
6475 /* Implemented only the options currently availible for the driver */
6476 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
6477 	.reset = dm_crtc_reset_state,
6478 	.destroy = amdgpu_dm_crtc_destroy,
6479 	.set_config = drm_atomic_helper_set_config,
6480 	.page_flip = drm_atomic_helper_page_flip,
6481 	.atomic_duplicate_state = dm_crtc_duplicate_state,
6482 	.atomic_destroy_state = dm_crtc_destroy_state,
6483 	.set_crc_source = amdgpu_dm_crtc_set_crc_source,
6484 	.verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
6485 	.get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
6486 	.get_vblank_counter = amdgpu_get_vblank_counter_kms,
6487 	.enable_vblank = dm_enable_vblank,
6488 	.disable_vblank = dm_disable_vblank,
6489 	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
6490 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
6491 	.late_register = amdgpu_dm_crtc_late_register,
6492 #endif
6493 };
6494 
6495 static enum drm_connector_status
6496 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
6497 {
6498 	bool connected;
6499 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6500 
6501 	/*
6502 	 * Notes:
6503 	 * 1. This interface is NOT called in context of HPD irq.
6504 	 * 2. This interface *is called* in context of user-mode ioctl. Which
6505 	 * makes it a bad place for *any* MST-related activity.
6506 	 */
6507 
6508 	if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
6509 	    !aconnector->fake_enable)
6510 		connected = (aconnector->dc_sink != NULL);
6511 	else
6512 		connected = (aconnector->base.force == DRM_FORCE_ON);
6513 
6514 	update_subconnector_property(aconnector);
6515 
6516 	return (connected ? connector_status_connected :
6517 			connector_status_disconnected);
6518 }
6519 
6520 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
6521 					    struct drm_connector_state *connector_state,
6522 					    struct drm_property *property,
6523 					    uint64_t val)
6524 {
6525 	struct drm_device *dev = connector->dev;
6526 	struct amdgpu_device *adev = drm_to_adev(dev);
6527 	struct dm_connector_state *dm_old_state =
6528 		to_dm_connector_state(connector->state);
6529 	struct dm_connector_state *dm_new_state =
6530 		to_dm_connector_state(connector_state);
6531 
6532 	int ret = -EINVAL;
6533 
6534 	if (property == dev->mode_config.scaling_mode_property) {
6535 		enum amdgpu_rmx_type rmx_type;
6536 
6537 		switch (val) {
6538 		case DRM_MODE_SCALE_CENTER:
6539 			rmx_type = RMX_CENTER;
6540 			break;
6541 		case DRM_MODE_SCALE_ASPECT:
6542 			rmx_type = RMX_ASPECT;
6543 			break;
6544 		case DRM_MODE_SCALE_FULLSCREEN:
6545 			rmx_type = RMX_FULL;
6546 			break;
6547 		case DRM_MODE_SCALE_NONE:
6548 		default:
6549 			rmx_type = RMX_OFF;
6550 			break;
6551 		}
6552 
6553 		if (dm_old_state->scaling == rmx_type)
6554 			return 0;
6555 
6556 		dm_new_state->scaling = rmx_type;
6557 		ret = 0;
6558 	} else if (property == adev->mode_info.underscan_hborder_property) {
6559 		dm_new_state->underscan_hborder = val;
6560 		ret = 0;
6561 	} else if (property == adev->mode_info.underscan_vborder_property) {
6562 		dm_new_state->underscan_vborder = val;
6563 		ret = 0;
6564 	} else if (property == adev->mode_info.underscan_property) {
6565 		dm_new_state->underscan_enable = val;
6566 		ret = 0;
6567 	} else if (property == adev->mode_info.abm_level_property) {
6568 		dm_new_state->abm_level = val;
6569 		ret = 0;
6570 	}
6571 
6572 	return ret;
6573 }
6574 
6575 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
6576 					    const struct drm_connector_state *state,
6577 					    struct drm_property *property,
6578 					    uint64_t *val)
6579 {
6580 	struct drm_device *dev = connector->dev;
6581 	struct amdgpu_device *adev = drm_to_adev(dev);
6582 	struct dm_connector_state *dm_state =
6583 		to_dm_connector_state(state);
6584 	int ret = -EINVAL;
6585 
6586 	if (property == dev->mode_config.scaling_mode_property) {
6587 		switch (dm_state->scaling) {
6588 		case RMX_CENTER:
6589 			*val = DRM_MODE_SCALE_CENTER;
6590 			break;
6591 		case RMX_ASPECT:
6592 			*val = DRM_MODE_SCALE_ASPECT;
6593 			break;
6594 		case RMX_FULL:
6595 			*val = DRM_MODE_SCALE_FULLSCREEN;
6596 			break;
6597 		case RMX_OFF:
6598 		default:
6599 			*val = DRM_MODE_SCALE_NONE;
6600 			break;
6601 		}
6602 		ret = 0;
6603 	} else if (property == adev->mode_info.underscan_hborder_property) {
6604 		*val = dm_state->underscan_hborder;
6605 		ret = 0;
6606 	} else if (property == adev->mode_info.underscan_vborder_property) {
6607 		*val = dm_state->underscan_vborder;
6608 		ret = 0;
6609 	} else if (property == adev->mode_info.underscan_property) {
6610 		*val = dm_state->underscan_enable;
6611 		ret = 0;
6612 	} else if (property == adev->mode_info.abm_level_property) {
6613 		*val = dm_state->abm_level;
6614 		ret = 0;
6615 	}
6616 
6617 	return ret;
6618 }
6619 
6620 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
6621 {
6622 	struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
6623 
6624 	drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
6625 }
6626 
6627 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
6628 {
6629 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6630 	const struct dc_link *link = aconnector->dc_link;
6631 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
6632 	struct amdgpu_display_manager *dm = &adev->dm;
6633 	int i;
6634 
6635 	/*
6636 	 * Call only if mst_mgr was iniitalized before since it's not done
6637 	 * for all connector types.
6638 	 */
6639 	if (aconnector->mst_mgr.dev)
6640 		drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
6641 
6642 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
6643 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
6644 	for (i = 0; i < dm->num_of_edps; i++) {
6645 		if ((link == dm->backlight_link[i]) && dm->backlight_dev[i]) {
6646 			backlight_device_unregister(dm->backlight_dev[i]);
6647 			dm->backlight_dev[i] = NULL;
6648 		}
6649 	}
6650 #endif
6651 
6652 	if (aconnector->dc_em_sink)
6653 		dc_sink_release(aconnector->dc_em_sink);
6654 	aconnector->dc_em_sink = NULL;
6655 	if (aconnector->dc_sink)
6656 		dc_sink_release(aconnector->dc_sink);
6657 	aconnector->dc_sink = NULL;
6658 
6659 	drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
6660 	drm_connector_unregister(connector);
6661 	drm_connector_cleanup(connector);
6662 	if (aconnector->i2c) {
6663 		i2c_del_adapter(&aconnector->i2c->base);
6664 		kfree(aconnector->i2c);
6665 	}
6666 	kfree(aconnector->dm_dp_aux.aux.name);
6667 
6668 	kfree(connector);
6669 }
6670 
6671 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
6672 {
6673 	struct dm_connector_state *state =
6674 		to_dm_connector_state(connector->state);
6675 
6676 	if (connector->state)
6677 		__drm_atomic_helper_connector_destroy_state(connector->state);
6678 
6679 	kfree(state);
6680 
6681 	state = kzalloc(sizeof(*state), GFP_KERNEL);
6682 
6683 	if (state) {
6684 		state->scaling = RMX_OFF;
6685 		state->underscan_enable = false;
6686 		state->underscan_hborder = 0;
6687 		state->underscan_vborder = 0;
6688 		state->base.max_requested_bpc = 8;
6689 		state->vcpi_slots = 0;
6690 		state->pbn = 0;
6691 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
6692 			state->abm_level = amdgpu_dm_abm_level;
6693 
6694 		__drm_atomic_helper_connector_reset(connector, &state->base);
6695 	}
6696 }
6697 
6698 struct drm_connector_state *
6699 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
6700 {
6701 	struct dm_connector_state *state =
6702 		to_dm_connector_state(connector->state);
6703 
6704 	struct dm_connector_state *new_state =
6705 			kmemdup(state, sizeof(*state), GFP_KERNEL);
6706 
6707 	if (!new_state)
6708 		return NULL;
6709 
6710 	__drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
6711 
6712 	new_state->freesync_capable = state->freesync_capable;
6713 	new_state->abm_level = state->abm_level;
6714 	new_state->scaling = state->scaling;
6715 	new_state->underscan_enable = state->underscan_enable;
6716 	new_state->underscan_hborder = state->underscan_hborder;
6717 	new_state->underscan_vborder = state->underscan_vborder;
6718 	new_state->vcpi_slots = state->vcpi_slots;
6719 	new_state->pbn = state->pbn;
6720 	return &new_state->base;
6721 }
6722 
6723 static int
6724 amdgpu_dm_connector_late_register(struct drm_connector *connector)
6725 {
6726 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6727 		to_amdgpu_dm_connector(connector);
6728 	int r;
6729 
6730 	if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
6731 	    (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
6732 		amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
6733 		r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
6734 		if (r)
6735 			return r;
6736 	}
6737 
6738 #if defined(CONFIG_DEBUG_FS)
6739 	connector_debugfs_init(amdgpu_dm_connector);
6740 #endif
6741 
6742 	return 0;
6743 }
6744 
6745 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
6746 	.reset = amdgpu_dm_connector_funcs_reset,
6747 	.detect = amdgpu_dm_connector_detect,
6748 	.fill_modes = drm_helper_probe_single_connector_modes,
6749 	.destroy = amdgpu_dm_connector_destroy,
6750 	.atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
6751 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6752 	.atomic_set_property = amdgpu_dm_connector_atomic_set_property,
6753 	.atomic_get_property = amdgpu_dm_connector_atomic_get_property,
6754 	.late_register = amdgpu_dm_connector_late_register,
6755 	.early_unregister = amdgpu_dm_connector_unregister
6756 };
6757 
6758 static int get_modes(struct drm_connector *connector)
6759 {
6760 	return amdgpu_dm_connector_get_modes(connector);
6761 }
6762 
6763 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
6764 {
6765 	struct dc_sink_init_data init_params = {
6766 			.link = aconnector->dc_link,
6767 			.sink_signal = SIGNAL_TYPE_VIRTUAL
6768 	};
6769 	struct edid *edid;
6770 
6771 	if (!aconnector->base.edid_blob_ptr) {
6772 		DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
6773 				aconnector->base.name);
6774 
6775 		aconnector->base.force = DRM_FORCE_OFF;
6776 		aconnector->base.override_edid = false;
6777 		return;
6778 	}
6779 
6780 	edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
6781 
6782 	aconnector->edid = edid;
6783 
6784 	aconnector->dc_em_sink = dc_link_add_remote_sink(
6785 		aconnector->dc_link,
6786 		(uint8_t *)edid,
6787 		(edid->extensions + 1) * EDID_LENGTH,
6788 		&init_params);
6789 
6790 	if (aconnector->base.force == DRM_FORCE_ON) {
6791 		aconnector->dc_sink = aconnector->dc_link->local_sink ?
6792 		aconnector->dc_link->local_sink :
6793 		aconnector->dc_em_sink;
6794 		dc_sink_retain(aconnector->dc_sink);
6795 	}
6796 }
6797 
6798 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
6799 {
6800 	struct dc_link *link = (struct dc_link *)aconnector->dc_link;
6801 
6802 	/*
6803 	 * In case of headless boot with force on for DP managed connector
6804 	 * Those settings have to be != 0 to get initial modeset
6805 	 */
6806 	if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6807 		link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
6808 		link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
6809 	}
6810 
6811 
6812 	aconnector->base.override_edid = true;
6813 	create_eml_sink(aconnector);
6814 }
6815 
6816 static struct dc_stream_state *
6817 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6818 				const struct drm_display_mode *drm_mode,
6819 				const struct dm_connector_state *dm_state,
6820 				const struct dc_stream_state *old_stream)
6821 {
6822 	struct drm_connector *connector = &aconnector->base;
6823 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
6824 	struct dc_stream_state *stream;
6825 	const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
6826 	int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
6827 	enum dc_status dc_result = DC_OK;
6828 
6829 	do {
6830 		stream = create_stream_for_sink(aconnector, drm_mode,
6831 						dm_state, old_stream,
6832 						requested_bpc);
6833 		if (stream == NULL) {
6834 			DRM_ERROR("Failed to create stream for sink!\n");
6835 			break;
6836 		}
6837 
6838 		dc_result = dc_validate_stream(adev->dm.dc, stream);
6839 
6840 		if (dc_result != DC_OK) {
6841 			DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
6842 				      drm_mode->hdisplay,
6843 				      drm_mode->vdisplay,
6844 				      drm_mode->clock,
6845 				      dc_result,
6846 				      dc_status_to_str(dc_result));
6847 
6848 			dc_stream_release(stream);
6849 			stream = NULL;
6850 			requested_bpc -= 2; /* lower bpc to retry validation */
6851 		}
6852 
6853 	} while (stream == NULL && requested_bpc >= 6);
6854 
6855 	if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
6856 		DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
6857 
6858 		aconnector->force_yuv420_output = true;
6859 		stream = create_validate_stream_for_sink(aconnector, drm_mode,
6860 						dm_state, old_stream);
6861 		aconnector->force_yuv420_output = false;
6862 	}
6863 
6864 	return stream;
6865 }
6866 
6867 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
6868 				   struct drm_display_mode *mode)
6869 {
6870 	int result = MODE_ERROR;
6871 	struct dc_sink *dc_sink;
6872 	/* TODO: Unhardcode stream count */
6873 	struct dc_stream_state *stream;
6874 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6875 
6876 	if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
6877 			(mode->flags & DRM_MODE_FLAG_DBLSCAN))
6878 		return result;
6879 
6880 	/*
6881 	 * Only run this the first time mode_valid is called to initilialize
6882 	 * EDID mgmt
6883 	 */
6884 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
6885 		!aconnector->dc_em_sink)
6886 		handle_edid_mgmt(aconnector);
6887 
6888 	dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
6889 
6890 	if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
6891 				aconnector->base.force != DRM_FORCE_ON) {
6892 		DRM_ERROR("dc_sink is NULL!\n");
6893 		goto fail;
6894 	}
6895 
6896 	stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
6897 	if (stream) {
6898 		dc_stream_release(stream);
6899 		result = MODE_OK;
6900 	}
6901 
6902 fail:
6903 	/* TODO: error handling*/
6904 	return result;
6905 }
6906 
6907 static int fill_hdr_info_packet(const struct drm_connector_state *state,
6908 				struct dc_info_packet *out)
6909 {
6910 	struct hdmi_drm_infoframe frame;
6911 	unsigned char buf[30]; /* 26 + 4 */
6912 	ssize_t len;
6913 	int ret, i;
6914 
6915 	memset(out, 0, sizeof(*out));
6916 
6917 	if (!state->hdr_output_metadata)
6918 		return 0;
6919 
6920 	ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
6921 	if (ret)
6922 		return ret;
6923 
6924 	len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
6925 	if (len < 0)
6926 		return (int)len;
6927 
6928 	/* Static metadata is a fixed 26 bytes + 4 byte header. */
6929 	if (len != 30)
6930 		return -EINVAL;
6931 
6932 	/* Prepare the infopacket for DC. */
6933 	switch (state->connector->connector_type) {
6934 	case DRM_MODE_CONNECTOR_HDMIA:
6935 		out->hb0 = 0x87; /* type */
6936 		out->hb1 = 0x01; /* version */
6937 		out->hb2 = 0x1A; /* length */
6938 		out->sb[0] = buf[3]; /* checksum */
6939 		i = 1;
6940 		break;
6941 
6942 	case DRM_MODE_CONNECTOR_DisplayPort:
6943 	case DRM_MODE_CONNECTOR_eDP:
6944 		out->hb0 = 0x00; /* sdp id, zero */
6945 		out->hb1 = 0x87; /* type */
6946 		out->hb2 = 0x1D; /* payload len - 1 */
6947 		out->hb3 = (0x13 << 2); /* sdp version */
6948 		out->sb[0] = 0x01; /* version */
6949 		out->sb[1] = 0x1A; /* length */
6950 		i = 2;
6951 		break;
6952 
6953 	default:
6954 		return -EINVAL;
6955 	}
6956 
6957 	memcpy(&out->sb[i], &buf[4], 26);
6958 	out->valid = true;
6959 
6960 	print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
6961 		       sizeof(out->sb), false);
6962 
6963 	return 0;
6964 }
6965 
6966 static int
6967 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
6968 				 struct drm_atomic_state *state)
6969 {
6970 	struct drm_connector_state *new_con_state =
6971 		drm_atomic_get_new_connector_state(state, conn);
6972 	struct drm_connector_state *old_con_state =
6973 		drm_atomic_get_old_connector_state(state, conn);
6974 	struct drm_crtc *crtc = new_con_state->crtc;
6975 	struct drm_crtc_state *new_crtc_state;
6976 	int ret;
6977 
6978 	trace_amdgpu_dm_connector_atomic_check(new_con_state);
6979 
6980 	if (!crtc)
6981 		return 0;
6982 
6983 	if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
6984 		struct dc_info_packet hdr_infopacket;
6985 
6986 		ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
6987 		if (ret)
6988 			return ret;
6989 
6990 		new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
6991 		if (IS_ERR(new_crtc_state))
6992 			return PTR_ERR(new_crtc_state);
6993 
6994 		/*
6995 		 * DC considers the stream backends changed if the
6996 		 * static metadata changes. Forcing the modeset also
6997 		 * gives a simple way for userspace to switch from
6998 		 * 8bpc to 10bpc when setting the metadata to enter
6999 		 * or exit HDR.
7000 		 *
7001 		 * Changing the static metadata after it's been
7002 		 * set is permissible, however. So only force a
7003 		 * modeset if we're entering or exiting HDR.
7004 		 */
7005 		new_crtc_state->mode_changed =
7006 			!old_con_state->hdr_output_metadata ||
7007 			!new_con_state->hdr_output_metadata;
7008 	}
7009 
7010 	return 0;
7011 }
7012 
7013 static const struct drm_connector_helper_funcs
7014 amdgpu_dm_connector_helper_funcs = {
7015 	/*
7016 	 * If hotplugging a second bigger display in FB Con mode, bigger resolution
7017 	 * modes will be filtered by drm_mode_validate_size(), and those modes
7018 	 * are missing after user start lightdm. So we need to renew modes list.
7019 	 * in get_modes call back, not just return the modes count
7020 	 */
7021 	.get_modes = get_modes,
7022 	.mode_valid = amdgpu_dm_connector_mode_valid,
7023 	.atomic_check = amdgpu_dm_connector_atomic_check,
7024 };
7025 
7026 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
7027 {
7028 }
7029 
7030 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
7031 {
7032 	struct drm_atomic_state *state = new_crtc_state->state;
7033 	struct drm_plane *plane;
7034 	int num_active = 0;
7035 
7036 	drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
7037 		struct drm_plane_state *new_plane_state;
7038 
7039 		/* Cursor planes are "fake". */
7040 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
7041 			continue;
7042 
7043 		new_plane_state = drm_atomic_get_new_plane_state(state, plane);
7044 
7045 		if (!new_plane_state) {
7046 			/*
7047 			 * The plane is enable on the CRTC and hasn't changed
7048 			 * state. This means that it previously passed
7049 			 * validation and is therefore enabled.
7050 			 */
7051 			num_active += 1;
7052 			continue;
7053 		}
7054 
7055 		/* We need a framebuffer to be considered enabled. */
7056 		num_active += (new_plane_state->fb != NULL);
7057 	}
7058 
7059 	return num_active;
7060 }
7061 
7062 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
7063 					 struct drm_crtc_state *new_crtc_state)
7064 {
7065 	struct dm_crtc_state *dm_new_crtc_state =
7066 		to_dm_crtc_state(new_crtc_state);
7067 
7068 	dm_new_crtc_state->active_planes = 0;
7069 
7070 	if (!dm_new_crtc_state->stream)
7071 		return;
7072 
7073 	dm_new_crtc_state->active_planes =
7074 		count_crtc_active_planes(new_crtc_state);
7075 }
7076 
7077 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
7078 				       struct drm_atomic_state *state)
7079 {
7080 	struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
7081 									  crtc);
7082 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
7083 	struct dc *dc = adev->dm.dc;
7084 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
7085 	int ret = -EINVAL;
7086 
7087 	trace_amdgpu_dm_crtc_atomic_check(crtc_state);
7088 
7089 	dm_update_crtc_active_planes(crtc, crtc_state);
7090 
7091 	if (WARN_ON(unlikely(!dm_crtc_state->stream &&
7092 		     modeset_required(crtc_state, NULL, dm_crtc_state->stream)))) {
7093 		return ret;
7094 	}
7095 
7096 	/*
7097 	 * We require the primary plane to be enabled whenever the CRTC is, otherwise
7098 	 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
7099 	 * planes are disabled, which is not supported by the hardware. And there is legacy
7100 	 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
7101 	 */
7102 	if (crtc_state->enable &&
7103 	    !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
7104 		DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
7105 		return -EINVAL;
7106 	}
7107 
7108 	/* In some use cases, like reset, no stream is attached */
7109 	if (!dm_crtc_state->stream)
7110 		return 0;
7111 
7112 	if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
7113 		return 0;
7114 
7115 	DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
7116 	return ret;
7117 }
7118 
7119 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
7120 				      const struct drm_display_mode *mode,
7121 				      struct drm_display_mode *adjusted_mode)
7122 {
7123 	return true;
7124 }
7125 
7126 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
7127 	.disable = dm_crtc_helper_disable,
7128 	.atomic_check = dm_crtc_helper_atomic_check,
7129 	.mode_fixup = dm_crtc_helper_mode_fixup,
7130 	.get_scanout_position = amdgpu_crtc_get_scanout_position,
7131 };
7132 
7133 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
7134 {
7135 
7136 }
7137 
7138 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
7139 {
7140 	switch (display_color_depth) {
7141 		case COLOR_DEPTH_666:
7142 			return 6;
7143 		case COLOR_DEPTH_888:
7144 			return 8;
7145 		case COLOR_DEPTH_101010:
7146 			return 10;
7147 		case COLOR_DEPTH_121212:
7148 			return 12;
7149 		case COLOR_DEPTH_141414:
7150 			return 14;
7151 		case COLOR_DEPTH_161616:
7152 			return 16;
7153 		default:
7154 			break;
7155 		}
7156 	return 0;
7157 }
7158 
7159 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
7160 					  struct drm_crtc_state *crtc_state,
7161 					  struct drm_connector_state *conn_state)
7162 {
7163 	struct drm_atomic_state *state = crtc_state->state;
7164 	struct drm_connector *connector = conn_state->connector;
7165 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7166 	struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
7167 	const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
7168 	struct drm_dp_mst_topology_mgr *mst_mgr;
7169 	struct drm_dp_mst_port *mst_port;
7170 	enum dc_color_depth color_depth;
7171 	int clock, bpp = 0;
7172 	bool is_y420 = false;
7173 
7174 	if (!aconnector->port || !aconnector->dc_sink)
7175 		return 0;
7176 
7177 	mst_port = aconnector->port;
7178 	mst_mgr = &aconnector->mst_port->mst_mgr;
7179 
7180 	if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
7181 		return 0;
7182 
7183 	if (!state->duplicated) {
7184 		int max_bpc = conn_state->max_requested_bpc;
7185 		is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
7186 				aconnector->force_yuv420_output;
7187 		color_depth = convert_color_depth_from_display_info(connector,
7188 								    is_y420,
7189 								    max_bpc);
7190 		bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
7191 		clock = adjusted_mode->clock;
7192 		dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
7193 	}
7194 	dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
7195 									   mst_mgr,
7196 									   mst_port,
7197 									   dm_new_connector_state->pbn,
7198 									   dm_mst_get_pbn_divider(aconnector->dc_link));
7199 	if (dm_new_connector_state->vcpi_slots < 0) {
7200 		DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
7201 		return dm_new_connector_state->vcpi_slots;
7202 	}
7203 	return 0;
7204 }
7205 
7206 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
7207 	.disable = dm_encoder_helper_disable,
7208 	.atomic_check = dm_encoder_helper_atomic_check
7209 };
7210 
7211 #if defined(CONFIG_DRM_AMD_DC_DCN)
7212 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
7213 					    struct dc_state *dc_state,
7214 					    struct dsc_mst_fairness_vars *vars)
7215 {
7216 	struct dc_stream_state *stream = NULL;
7217 	struct drm_connector *connector;
7218 	struct drm_connector_state *new_con_state;
7219 	struct amdgpu_dm_connector *aconnector;
7220 	struct dm_connector_state *dm_conn_state;
7221 	int i, j, clock;
7222 	int vcpi, pbn_div, pbn = 0;
7223 
7224 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
7225 
7226 		aconnector = to_amdgpu_dm_connector(connector);
7227 
7228 		if (!aconnector->port)
7229 			continue;
7230 
7231 		if (!new_con_state || !new_con_state->crtc)
7232 			continue;
7233 
7234 		dm_conn_state = to_dm_connector_state(new_con_state);
7235 
7236 		for (j = 0; j < dc_state->stream_count; j++) {
7237 			stream = dc_state->streams[j];
7238 			if (!stream)
7239 				continue;
7240 
7241 			if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
7242 				break;
7243 
7244 			stream = NULL;
7245 		}
7246 
7247 		if (!stream)
7248 			continue;
7249 
7250 		if (stream->timing.flags.DSC != 1) {
7251 			drm_dp_mst_atomic_enable_dsc(state,
7252 						     aconnector->port,
7253 						     dm_conn_state->pbn,
7254 						     0,
7255 						     false);
7256 			continue;
7257 		}
7258 
7259 		pbn_div = dm_mst_get_pbn_divider(stream->link);
7260 		clock = stream->timing.pix_clk_100hz / 10;
7261 		/* pbn is calculated by compute_mst_dsc_configs_for_state*/
7262 		for (j = 0; j < dc_state->stream_count; j++) {
7263 			if (vars[j].aconnector == aconnector) {
7264 				pbn = vars[j].pbn;
7265 				break;
7266 			}
7267 		}
7268 
7269 		vcpi = drm_dp_mst_atomic_enable_dsc(state,
7270 						    aconnector->port,
7271 						    pbn, pbn_div,
7272 						    true);
7273 		if (vcpi < 0)
7274 			return vcpi;
7275 
7276 		dm_conn_state->pbn = pbn;
7277 		dm_conn_state->vcpi_slots = vcpi;
7278 	}
7279 	return 0;
7280 }
7281 #endif
7282 
7283 static void dm_drm_plane_reset(struct drm_plane *plane)
7284 {
7285 	struct dm_plane_state *amdgpu_state = NULL;
7286 
7287 	if (plane->state)
7288 		plane->funcs->atomic_destroy_state(plane, plane->state);
7289 
7290 	amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
7291 	WARN_ON(amdgpu_state == NULL);
7292 
7293 	if (amdgpu_state)
7294 		__drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
7295 }
7296 
7297 static struct drm_plane_state *
7298 dm_drm_plane_duplicate_state(struct drm_plane *plane)
7299 {
7300 	struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
7301 
7302 	old_dm_plane_state = to_dm_plane_state(plane->state);
7303 	dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
7304 	if (!dm_plane_state)
7305 		return NULL;
7306 
7307 	__drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
7308 
7309 	if (old_dm_plane_state->dc_state) {
7310 		dm_plane_state->dc_state = old_dm_plane_state->dc_state;
7311 		dc_plane_state_retain(dm_plane_state->dc_state);
7312 	}
7313 
7314 	return &dm_plane_state->base;
7315 }
7316 
7317 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
7318 				struct drm_plane_state *state)
7319 {
7320 	struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
7321 
7322 	if (dm_plane_state->dc_state)
7323 		dc_plane_state_release(dm_plane_state->dc_state);
7324 
7325 	drm_atomic_helper_plane_destroy_state(plane, state);
7326 }
7327 
7328 static const struct drm_plane_funcs dm_plane_funcs = {
7329 	.update_plane	= drm_atomic_helper_update_plane,
7330 	.disable_plane	= drm_atomic_helper_disable_plane,
7331 	.destroy	= drm_primary_helper_destroy,
7332 	.reset = dm_drm_plane_reset,
7333 	.atomic_duplicate_state = dm_drm_plane_duplicate_state,
7334 	.atomic_destroy_state = dm_drm_plane_destroy_state,
7335 	.format_mod_supported = dm_plane_format_mod_supported,
7336 };
7337 
7338 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
7339 				      struct drm_plane_state *new_state)
7340 {
7341 	struct amdgpu_framebuffer *afb;
7342 	struct drm_gem_object *obj;
7343 	struct amdgpu_device *adev;
7344 	struct amdgpu_bo *rbo;
7345 	struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
7346 	struct list_head list;
7347 	struct ttm_validate_buffer tv;
7348 	struct ww_acquire_ctx ticket;
7349 	uint32_t domain;
7350 	int r;
7351 
7352 	if (!new_state->fb) {
7353 		DRM_DEBUG_KMS("No FB bound\n");
7354 		return 0;
7355 	}
7356 
7357 	afb = to_amdgpu_framebuffer(new_state->fb);
7358 	obj = new_state->fb->obj[0];
7359 	rbo = gem_to_amdgpu_bo(obj);
7360 	adev = amdgpu_ttm_adev(rbo->tbo.bdev);
7361 	INIT_LIST_HEAD(&list);
7362 
7363 	tv.bo = &rbo->tbo;
7364 	tv.num_shared = 1;
7365 	list_add(&tv.head, &list);
7366 
7367 	r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
7368 	if (r) {
7369 		dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
7370 		return r;
7371 	}
7372 
7373 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
7374 		domain = amdgpu_display_supported_domains(adev, rbo->flags);
7375 	else
7376 		domain = AMDGPU_GEM_DOMAIN_VRAM;
7377 
7378 	r = amdgpu_bo_pin(rbo, domain);
7379 	if (unlikely(r != 0)) {
7380 		if (r != -ERESTARTSYS)
7381 			DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
7382 		ttm_eu_backoff_reservation(&ticket, &list);
7383 		return r;
7384 	}
7385 
7386 	r = amdgpu_ttm_alloc_gart(&rbo->tbo);
7387 	if (unlikely(r != 0)) {
7388 		amdgpu_bo_unpin(rbo);
7389 		ttm_eu_backoff_reservation(&ticket, &list);
7390 		DRM_ERROR("%p bind failed\n", rbo);
7391 		return r;
7392 	}
7393 
7394 	ttm_eu_backoff_reservation(&ticket, &list);
7395 
7396 	afb->address = amdgpu_bo_gpu_offset(rbo);
7397 
7398 	amdgpu_bo_ref(rbo);
7399 
7400 	/**
7401 	 * We don't do surface updates on planes that have been newly created,
7402 	 * but we also don't have the afb->address during atomic check.
7403 	 *
7404 	 * Fill in buffer attributes depending on the address here, but only on
7405 	 * newly created planes since they're not being used by DC yet and this
7406 	 * won't modify global state.
7407 	 */
7408 	dm_plane_state_old = to_dm_plane_state(plane->state);
7409 	dm_plane_state_new = to_dm_plane_state(new_state);
7410 
7411 	if (dm_plane_state_new->dc_state &&
7412 	    dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
7413 		struct dc_plane_state *plane_state =
7414 			dm_plane_state_new->dc_state;
7415 		bool force_disable_dcc = !plane_state->dcc.enable;
7416 
7417 		fill_plane_buffer_attributes(
7418 			adev, afb, plane_state->format, plane_state->rotation,
7419 			afb->tiling_flags,
7420 			&plane_state->tiling_info, &plane_state->plane_size,
7421 			&plane_state->dcc, &plane_state->address,
7422 			afb->tmz_surface, force_disable_dcc);
7423 	}
7424 
7425 	return 0;
7426 }
7427 
7428 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
7429 				       struct drm_plane_state *old_state)
7430 {
7431 	struct amdgpu_bo *rbo;
7432 	int r;
7433 
7434 	if (!old_state->fb)
7435 		return;
7436 
7437 	rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
7438 	r = amdgpu_bo_reserve(rbo, false);
7439 	if (unlikely(r)) {
7440 		DRM_ERROR("failed to reserve rbo before unpin\n");
7441 		return;
7442 	}
7443 
7444 	amdgpu_bo_unpin(rbo);
7445 	amdgpu_bo_unreserve(rbo);
7446 	amdgpu_bo_unref(&rbo);
7447 }
7448 
7449 static int dm_plane_helper_check_state(struct drm_plane_state *state,
7450 				       struct drm_crtc_state *new_crtc_state)
7451 {
7452 	struct drm_framebuffer *fb = state->fb;
7453 	int min_downscale, max_upscale;
7454 	int min_scale = 0;
7455 	int max_scale = INT_MAX;
7456 
7457 	/* Plane enabled? Validate viewport and get scaling factors from plane caps. */
7458 	if (fb && state->crtc) {
7459 		/* Validate viewport to cover the case when only the position changes */
7460 		if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
7461 			int viewport_width = state->crtc_w;
7462 			int viewport_height = state->crtc_h;
7463 
7464 			if (state->crtc_x < 0)
7465 				viewport_width += state->crtc_x;
7466 			else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
7467 				viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
7468 
7469 			if (state->crtc_y < 0)
7470 				viewport_height += state->crtc_y;
7471 			else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
7472 				viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
7473 
7474 			if (viewport_width < 0 || viewport_height < 0) {
7475 				DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
7476 				return -EINVAL;
7477 			} else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
7478 				DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
7479 				return -EINVAL;
7480 			} else if (viewport_height < MIN_VIEWPORT_SIZE) {
7481 				DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
7482 				return -EINVAL;
7483 			}
7484 
7485 		}
7486 
7487 		/* Get min/max allowed scaling factors from plane caps. */
7488 		get_min_max_dc_plane_scaling(state->crtc->dev, fb,
7489 					     &min_downscale, &max_upscale);
7490 		/*
7491 		 * Convert to drm convention: 16.16 fixed point, instead of dc's
7492 		 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
7493 		 * dst/src, so min_scale = 1.0 / max_upscale, etc.
7494 		 */
7495 		min_scale = (1000 << 16) / max_upscale;
7496 		max_scale = (1000 << 16) / min_downscale;
7497 	}
7498 
7499 	return drm_atomic_helper_check_plane_state(
7500 		state, new_crtc_state, min_scale, max_scale, true, true);
7501 }
7502 
7503 static int dm_plane_atomic_check(struct drm_plane *plane,
7504 				 struct drm_atomic_state *state)
7505 {
7506 	struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
7507 										 plane);
7508 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
7509 	struct dc *dc = adev->dm.dc;
7510 	struct dm_plane_state *dm_plane_state;
7511 	struct dc_scaling_info scaling_info;
7512 	struct drm_crtc_state *new_crtc_state;
7513 	int ret;
7514 
7515 	trace_amdgpu_dm_plane_atomic_check(new_plane_state);
7516 
7517 	dm_plane_state = to_dm_plane_state(new_plane_state);
7518 
7519 	if (!dm_plane_state->dc_state)
7520 		return 0;
7521 
7522 	new_crtc_state =
7523 		drm_atomic_get_new_crtc_state(state,
7524 					      new_plane_state->crtc);
7525 	if (!new_crtc_state)
7526 		return -EINVAL;
7527 
7528 	ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
7529 	if (ret)
7530 		return ret;
7531 
7532 	ret = fill_dc_scaling_info(new_plane_state, &scaling_info);
7533 	if (ret)
7534 		return ret;
7535 
7536 	if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
7537 		return 0;
7538 
7539 	return -EINVAL;
7540 }
7541 
7542 static int dm_plane_atomic_async_check(struct drm_plane *plane,
7543 				       struct drm_atomic_state *state)
7544 {
7545 	/* Only support async updates on cursor planes. */
7546 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
7547 		return -EINVAL;
7548 
7549 	return 0;
7550 }
7551 
7552 static void dm_plane_atomic_async_update(struct drm_plane *plane,
7553 					 struct drm_atomic_state *state)
7554 {
7555 	struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
7556 									   plane);
7557 	struct drm_plane_state *old_state =
7558 		drm_atomic_get_old_plane_state(state, plane);
7559 
7560 	trace_amdgpu_dm_atomic_update_cursor(new_state);
7561 
7562 	swap(plane->state->fb, new_state->fb);
7563 
7564 	plane->state->src_x = new_state->src_x;
7565 	plane->state->src_y = new_state->src_y;
7566 	plane->state->src_w = new_state->src_w;
7567 	plane->state->src_h = new_state->src_h;
7568 	plane->state->crtc_x = new_state->crtc_x;
7569 	plane->state->crtc_y = new_state->crtc_y;
7570 	plane->state->crtc_w = new_state->crtc_w;
7571 	plane->state->crtc_h = new_state->crtc_h;
7572 
7573 	handle_cursor_update(plane, old_state);
7574 }
7575 
7576 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
7577 	.prepare_fb = dm_plane_helper_prepare_fb,
7578 	.cleanup_fb = dm_plane_helper_cleanup_fb,
7579 	.atomic_check = dm_plane_atomic_check,
7580 	.atomic_async_check = dm_plane_atomic_async_check,
7581 	.atomic_async_update = dm_plane_atomic_async_update
7582 };
7583 
7584 /*
7585  * TODO: these are currently initialized to rgb formats only.
7586  * For future use cases we should either initialize them dynamically based on
7587  * plane capabilities, or initialize this array to all formats, so internal drm
7588  * check will succeed, and let DC implement proper check
7589  */
7590 static const uint32_t rgb_formats[] = {
7591 	DRM_FORMAT_XRGB8888,
7592 	DRM_FORMAT_ARGB8888,
7593 	DRM_FORMAT_RGBA8888,
7594 	DRM_FORMAT_XRGB2101010,
7595 	DRM_FORMAT_XBGR2101010,
7596 	DRM_FORMAT_ARGB2101010,
7597 	DRM_FORMAT_ABGR2101010,
7598 	DRM_FORMAT_XRGB16161616,
7599 	DRM_FORMAT_XBGR16161616,
7600 	DRM_FORMAT_ARGB16161616,
7601 	DRM_FORMAT_ABGR16161616,
7602 	DRM_FORMAT_XBGR8888,
7603 	DRM_FORMAT_ABGR8888,
7604 	DRM_FORMAT_RGB565,
7605 };
7606 
7607 static const uint32_t overlay_formats[] = {
7608 	DRM_FORMAT_XRGB8888,
7609 	DRM_FORMAT_ARGB8888,
7610 	DRM_FORMAT_RGBA8888,
7611 	DRM_FORMAT_XBGR8888,
7612 	DRM_FORMAT_ABGR8888,
7613 	DRM_FORMAT_RGB565
7614 };
7615 
7616 static const u32 cursor_formats[] = {
7617 	DRM_FORMAT_ARGB8888
7618 };
7619 
7620 static int get_plane_formats(const struct drm_plane *plane,
7621 			     const struct dc_plane_cap *plane_cap,
7622 			     uint32_t *formats, int max_formats)
7623 {
7624 	int i, num_formats = 0;
7625 
7626 	/*
7627 	 * TODO: Query support for each group of formats directly from
7628 	 * DC plane caps. This will require adding more formats to the
7629 	 * caps list.
7630 	 */
7631 
7632 	switch (plane->type) {
7633 	case DRM_PLANE_TYPE_PRIMARY:
7634 		for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
7635 			if (num_formats >= max_formats)
7636 				break;
7637 
7638 			formats[num_formats++] = rgb_formats[i];
7639 		}
7640 
7641 		if (plane_cap && plane_cap->pixel_format_support.nv12)
7642 			formats[num_formats++] = DRM_FORMAT_NV12;
7643 		if (plane_cap && plane_cap->pixel_format_support.p010)
7644 			formats[num_formats++] = DRM_FORMAT_P010;
7645 		if (plane_cap && plane_cap->pixel_format_support.fp16) {
7646 			formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
7647 			formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
7648 			formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
7649 			formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
7650 		}
7651 		break;
7652 
7653 	case DRM_PLANE_TYPE_OVERLAY:
7654 		for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
7655 			if (num_formats >= max_formats)
7656 				break;
7657 
7658 			formats[num_formats++] = overlay_formats[i];
7659 		}
7660 		break;
7661 
7662 	case DRM_PLANE_TYPE_CURSOR:
7663 		for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
7664 			if (num_formats >= max_formats)
7665 				break;
7666 
7667 			formats[num_formats++] = cursor_formats[i];
7668 		}
7669 		break;
7670 	}
7671 
7672 	return num_formats;
7673 }
7674 
7675 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
7676 				struct drm_plane *plane,
7677 				unsigned long possible_crtcs,
7678 				const struct dc_plane_cap *plane_cap)
7679 {
7680 	uint32_t formats[32];
7681 	int num_formats;
7682 	int res = -EPERM;
7683 	unsigned int supported_rotations;
7684 	uint64_t *modifiers = NULL;
7685 
7686 	num_formats = get_plane_formats(plane, plane_cap, formats,
7687 					ARRAY_SIZE(formats));
7688 
7689 	res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
7690 	if (res)
7691 		return res;
7692 
7693 	res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
7694 				       &dm_plane_funcs, formats, num_formats,
7695 				       modifiers, plane->type, NULL);
7696 	kfree(modifiers);
7697 	if (res)
7698 		return res;
7699 
7700 	if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
7701 	    plane_cap && plane_cap->per_pixel_alpha) {
7702 		unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
7703 					  BIT(DRM_MODE_BLEND_PREMULTI);
7704 
7705 		drm_plane_create_alpha_property(plane);
7706 		drm_plane_create_blend_mode_property(plane, blend_caps);
7707 	}
7708 
7709 	if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
7710 	    plane_cap &&
7711 	    (plane_cap->pixel_format_support.nv12 ||
7712 	     plane_cap->pixel_format_support.p010)) {
7713 		/* This only affects YUV formats. */
7714 		drm_plane_create_color_properties(
7715 			plane,
7716 			BIT(DRM_COLOR_YCBCR_BT601) |
7717 			BIT(DRM_COLOR_YCBCR_BT709) |
7718 			BIT(DRM_COLOR_YCBCR_BT2020),
7719 			BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
7720 			BIT(DRM_COLOR_YCBCR_FULL_RANGE),
7721 			DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
7722 	}
7723 
7724 	supported_rotations =
7725 		DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
7726 		DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
7727 
7728 	if (dm->adev->asic_type >= CHIP_BONAIRE &&
7729 	    plane->type != DRM_PLANE_TYPE_CURSOR)
7730 		drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
7731 						   supported_rotations);
7732 
7733 	drm_plane_helper_add(plane, &dm_plane_helper_funcs);
7734 
7735 	/* Create (reset) the plane state */
7736 	if (plane->funcs->reset)
7737 		plane->funcs->reset(plane);
7738 
7739 	return 0;
7740 }
7741 
7742 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
7743 			       struct drm_plane *plane,
7744 			       uint32_t crtc_index)
7745 {
7746 	struct amdgpu_crtc *acrtc = NULL;
7747 	struct drm_plane *cursor_plane;
7748 
7749 	int res = -ENOMEM;
7750 
7751 	cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
7752 	if (!cursor_plane)
7753 		goto fail;
7754 
7755 	cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
7756 	res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
7757 
7758 	acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
7759 	if (!acrtc)
7760 		goto fail;
7761 
7762 	res = drm_crtc_init_with_planes(
7763 			dm->ddev,
7764 			&acrtc->base,
7765 			plane,
7766 			cursor_plane,
7767 			&amdgpu_dm_crtc_funcs, NULL);
7768 
7769 	if (res)
7770 		goto fail;
7771 
7772 	drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
7773 
7774 	/* Create (reset) the plane state */
7775 	if (acrtc->base.funcs->reset)
7776 		acrtc->base.funcs->reset(&acrtc->base);
7777 
7778 	acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
7779 	acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
7780 
7781 	acrtc->crtc_id = crtc_index;
7782 	acrtc->base.enabled = false;
7783 	acrtc->otg_inst = -1;
7784 
7785 	dm->adev->mode_info.crtcs[crtc_index] = acrtc;
7786 	drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
7787 				   true, MAX_COLOR_LUT_ENTRIES);
7788 	drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
7789 
7790 	return 0;
7791 
7792 fail:
7793 	kfree(acrtc);
7794 	kfree(cursor_plane);
7795 	return res;
7796 }
7797 
7798 
7799 static int to_drm_connector_type(enum signal_type st)
7800 {
7801 	switch (st) {
7802 	case SIGNAL_TYPE_HDMI_TYPE_A:
7803 		return DRM_MODE_CONNECTOR_HDMIA;
7804 	case SIGNAL_TYPE_EDP:
7805 		return DRM_MODE_CONNECTOR_eDP;
7806 	case SIGNAL_TYPE_LVDS:
7807 		return DRM_MODE_CONNECTOR_LVDS;
7808 	case SIGNAL_TYPE_RGB:
7809 		return DRM_MODE_CONNECTOR_VGA;
7810 	case SIGNAL_TYPE_DISPLAY_PORT:
7811 	case SIGNAL_TYPE_DISPLAY_PORT_MST:
7812 		return DRM_MODE_CONNECTOR_DisplayPort;
7813 	case SIGNAL_TYPE_DVI_DUAL_LINK:
7814 	case SIGNAL_TYPE_DVI_SINGLE_LINK:
7815 		return DRM_MODE_CONNECTOR_DVID;
7816 	case SIGNAL_TYPE_VIRTUAL:
7817 		return DRM_MODE_CONNECTOR_VIRTUAL;
7818 
7819 	default:
7820 		return DRM_MODE_CONNECTOR_Unknown;
7821 	}
7822 }
7823 
7824 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
7825 {
7826 	struct drm_encoder *encoder;
7827 
7828 	/* There is only one encoder per connector */
7829 	drm_connector_for_each_possible_encoder(connector, encoder)
7830 		return encoder;
7831 
7832 	return NULL;
7833 }
7834 
7835 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
7836 {
7837 	struct drm_encoder *encoder;
7838 	struct amdgpu_encoder *amdgpu_encoder;
7839 
7840 	encoder = amdgpu_dm_connector_to_encoder(connector);
7841 
7842 	if (encoder == NULL)
7843 		return;
7844 
7845 	amdgpu_encoder = to_amdgpu_encoder(encoder);
7846 
7847 	amdgpu_encoder->native_mode.clock = 0;
7848 
7849 	if (!list_empty(&connector->probed_modes)) {
7850 		struct drm_display_mode *preferred_mode = NULL;
7851 
7852 		list_for_each_entry(preferred_mode,
7853 				    &connector->probed_modes,
7854 				    head) {
7855 			if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
7856 				amdgpu_encoder->native_mode = *preferred_mode;
7857 
7858 			break;
7859 		}
7860 
7861 	}
7862 }
7863 
7864 static struct drm_display_mode *
7865 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
7866 			     char *name,
7867 			     int hdisplay, int vdisplay)
7868 {
7869 	struct drm_device *dev = encoder->dev;
7870 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7871 	struct drm_display_mode *mode = NULL;
7872 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7873 
7874 	mode = drm_mode_duplicate(dev, native_mode);
7875 
7876 	if (mode == NULL)
7877 		return NULL;
7878 
7879 	mode->hdisplay = hdisplay;
7880 	mode->vdisplay = vdisplay;
7881 	mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7882 	strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
7883 
7884 	return mode;
7885 
7886 }
7887 
7888 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
7889 						 struct drm_connector *connector)
7890 {
7891 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7892 	struct drm_display_mode *mode = NULL;
7893 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7894 	struct amdgpu_dm_connector *amdgpu_dm_connector =
7895 				to_amdgpu_dm_connector(connector);
7896 	int i;
7897 	int n;
7898 	struct mode_size {
7899 		char name[DRM_DISPLAY_MODE_LEN];
7900 		int w;
7901 		int h;
7902 	} common_modes[] = {
7903 		{  "640x480",  640,  480},
7904 		{  "800x600",  800,  600},
7905 		{ "1024x768", 1024,  768},
7906 		{ "1280x720", 1280,  720},
7907 		{ "1280x800", 1280,  800},
7908 		{"1280x1024", 1280, 1024},
7909 		{ "1440x900", 1440,  900},
7910 		{"1680x1050", 1680, 1050},
7911 		{"1600x1200", 1600, 1200},
7912 		{"1920x1080", 1920, 1080},
7913 		{"1920x1200", 1920, 1200}
7914 	};
7915 
7916 	n = ARRAY_SIZE(common_modes);
7917 
7918 	for (i = 0; i < n; i++) {
7919 		struct drm_display_mode *curmode = NULL;
7920 		bool mode_existed = false;
7921 
7922 		if (common_modes[i].w > native_mode->hdisplay ||
7923 		    common_modes[i].h > native_mode->vdisplay ||
7924 		   (common_modes[i].w == native_mode->hdisplay &&
7925 		    common_modes[i].h == native_mode->vdisplay))
7926 			continue;
7927 
7928 		list_for_each_entry(curmode, &connector->probed_modes, head) {
7929 			if (common_modes[i].w == curmode->hdisplay &&
7930 			    common_modes[i].h == curmode->vdisplay) {
7931 				mode_existed = true;
7932 				break;
7933 			}
7934 		}
7935 
7936 		if (mode_existed)
7937 			continue;
7938 
7939 		mode = amdgpu_dm_create_common_mode(encoder,
7940 				common_modes[i].name, common_modes[i].w,
7941 				common_modes[i].h);
7942 		drm_mode_probed_add(connector, mode);
7943 		amdgpu_dm_connector->num_modes++;
7944 	}
7945 }
7946 
7947 static void amdgpu_set_panel_orientation(struct drm_connector *connector)
7948 {
7949 	struct drm_encoder *encoder;
7950 	struct amdgpu_encoder *amdgpu_encoder;
7951 	const struct drm_display_mode *native_mode;
7952 
7953 	if (connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
7954 	    connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
7955 		return;
7956 
7957 	encoder = amdgpu_dm_connector_to_encoder(connector);
7958 	if (!encoder)
7959 		return;
7960 
7961 	amdgpu_encoder = to_amdgpu_encoder(encoder);
7962 
7963 	native_mode = &amdgpu_encoder->native_mode;
7964 	if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0)
7965 		return;
7966 
7967 	drm_connector_set_panel_orientation_with_quirk(connector,
7968 						       DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
7969 						       native_mode->hdisplay,
7970 						       native_mode->vdisplay);
7971 }
7972 
7973 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
7974 					      struct edid *edid)
7975 {
7976 	struct amdgpu_dm_connector *amdgpu_dm_connector =
7977 			to_amdgpu_dm_connector(connector);
7978 
7979 	if (edid) {
7980 		/* empty probed_modes */
7981 		INIT_LIST_HEAD(&connector->probed_modes);
7982 		amdgpu_dm_connector->num_modes =
7983 				drm_add_edid_modes(connector, edid);
7984 
7985 		/* sorting the probed modes before calling function
7986 		 * amdgpu_dm_get_native_mode() since EDID can have
7987 		 * more than one preferred mode. The modes that are
7988 		 * later in the probed mode list could be of higher
7989 		 * and preferred resolution. For example, 3840x2160
7990 		 * resolution in base EDID preferred timing and 4096x2160
7991 		 * preferred resolution in DID extension block later.
7992 		 */
7993 		drm_mode_sort(&connector->probed_modes);
7994 		amdgpu_dm_get_native_mode(connector);
7995 
7996 		/* Freesync capabilities are reset by calling
7997 		 * drm_add_edid_modes() and need to be
7998 		 * restored here.
7999 		 */
8000 		amdgpu_dm_update_freesync_caps(connector, edid);
8001 
8002 		amdgpu_set_panel_orientation(connector);
8003 	} else {
8004 		amdgpu_dm_connector->num_modes = 0;
8005 	}
8006 }
8007 
8008 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
8009 			      struct drm_display_mode *mode)
8010 {
8011 	struct drm_display_mode *m;
8012 
8013 	list_for_each_entry (m, &aconnector->base.probed_modes, head) {
8014 		if (drm_mode_equal(m, mode))
8015 			return true;
8016 	}
8017 
8018 	return false;
8019 }
8020 
8021 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
8022 {
8023 	const struct drm_display_mode *m;
8024 	struct drm_display_mode *new_mode;
8025 	uint i;
8026 	uint32_t new_modes_count = 0;
8027 
8028 	/* Standard FPS values
8029 	 *
8030 	 * 23.976       - TV/NTSC
8031 	 * 24 	        - Cinema
8032 	 * 25 	        - TV/PAL
8033 	 * 29.97        - TV/NTSC
8034 	 * 30 	        - TV/NTSC
8035 	 * 48 	        - Cinema HFR
8036 	 * 50 	        - TV/PAL
8037 	 * 60 	        - Commonly used
8038 	 * 48,72,96,120 - Multiples of 24
8039 	 */
8040 	static const uint32_t common_rates[] = {
8041 		23976, 24000, 25000, 29970, 30000,
8042 		48000, 50000, 60000, 72000, 96000, 120000
8043 	};
8044 
8045 	/*
8046 	 * Find mode with highest refresh rate with the same resolution
8047 	 * as the preferred mode. Some monitors report a preferred mode
8048 	 * with lower resolution than the highest refresh rate supported.
8049 	 */
8050 
8051 	m = get_highest_refresh_rate_mode(aconnector, true);
8052 	if (!m)
8053 		return 0;
8054 
8055 	for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
8056 		uint64_t target_vtotal, target_vtotal_diff;
8057 		uint64_t num, den;
8058 
8059 		if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
8060 			continue;
8061 
8062 		if (common_rates[i] < aconnector->min_vfreq * 1000 ||
8063 		    common_rates[i] > aconnector->max_vfreq * 1000)
8064 			continue;
8065 
8066 		num = (unsigned long long)m->clock * 1000 * 1000;
8067 		den = common_rates[i] * (unsigned long long)m->htotal;
8068 		target_vtotal = div_u64(num, den);
8069 		target_vtotal_diff = target_vtotal - m->vtotal;
8070 
8071 		/* Check for illegal modes */
8072 		if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
8073 		    m->vsync_end + target_vtotal_diff < m->vsync_start ||
8074 		    m->vtotal + target_vtotal_diff < m->vsync_end)
8075 			continue;
8076 
8077 		new_mode = drm_mode_duplicate(aconnector->base.dev, m);
8078 		if (!new_mode)
8079 			goto out;
8080 
8081 		new_mode->vtotal += (u16)target_vtotal_diff;
8082 		new_mode->vsync_start += (u16)target_vtotal_diff;
8083 		new_mode->vsync_end += (u16)target_vtotal_diff;
8084 		new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
8085 		new_mode->type |= DRM_MODE_TYPE_DRIVER;
8086 
8087 		if (!is_duplicate_mode(aconnector, new_mode)) {
8088 			drm_mode_probed_add(&aconnector->base, new_mode);
8089 			new_modes_count += 1;
8090 		} else
8091 			drm_mode_destroy(aconnector->base.dev, new_mode);
8092 	}
8093  out:
8094 	return new_modes_count;
8095 }
8096 
8097 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
8098 						   struct edid *edid)
8099 {
8100 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8101 		to_amdgpu_dm_connector(connector);
8102 
8103 	if (!(amdgpu_freesync_vid_mode && edid))
8104 		return;
8105 
8106 	if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
8107 		amdgpu_dm_connector->num_modes +=
8108 			add_fs_modes(amdgpu_dm_connector);
8109 }
8110 
8111 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
8112 {
8113 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8114 			to_amdgpu_dm_connector(connector);
8115 	struct drm_encoder *encoder;
8116 	struct edid *edid = amdgpu_dm_connector->edid;
8117 
8118 	encoder = amdgpu_dm_connector_to_encoder(connector);
8119 
8120 	if (!drm_edid_is_valid(edid)) {
8121 		amdgpu_dm_connector->num_modes =
8122 				drm_add_modes_noedid(connector, 640, 480);
8123 	} else {
8124 		amdgpu_dm_connector_ddc_get_modes(connector, edid);
8125 		amdgpu_dm_connector_add_common_modes(encoder, connector);
8126 		amdgpu_dm_connector_add_freesync_modes(connector, edid);
8127 	}
8128 	amdgpu_dm_fbc_init(connector);
8129 
8130 	return amdgpu_dm_connector->num_modes;
8131 }
8132 
8133 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
8134 				     struct amdgpu_dm_connector *aconnector,
8135 				     int connector_type,
8136 				     struct dc_link *link,
8137 				     int link_index)
8138 {
8139 	struct amdgpu_device *adev = drm_to_adev(dm->ddev);
8140 
8141 	/*
8142 	 * Some of the properties below require access to state, like bpc.
8143 	 * Allocate some default initial connector state with our reset helper.
8144 	 */
8145 	if (aconnector->base.funcs->reset)
8146 		aconnector->base.funcs->reset(&aconnector->base);
8147 
8148 	aconnector->connector_id = link_index;
8149 	aconnector->dc_link = link;
8150 	aconnector->base.interlace_allowed = false;
8151 	aconnector->base.doublescan_allowed = false;
8152 	aconnector->base.stereo_allowed = false;
8153 	aconnector->base.dpms = DRM_MODE_DPMS_OFF;
8154 	aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
8155 	aconnector->audio_inst = -1;
8156 	mutex_init(&aconnector->hpd_lock);
8157 
8158 	/*
8159 	 * configure support HPD hot plug connector_>polled default value is 0
8160 	 * which means HPD hot plug not supported
8161 	 */
8162 	switch (connector_type) {
8163 	case DRM_MODE_CONNECTOR_HDMIA:
8164 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8165 		aconnector->base.ycbcr_420_allowed =
8166 			link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
8167 		break;
8168 	case DRM_MODE_CONNECTOR_DisplayPort:
8169 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8170 		if (link->is_dig_mapping_flexible &&
8171 		    link->dc->res_pool->funcs->link_encs_assign) {
8172 			link->link_enc =
8173 				link_enc_cfg_get_link_enc_used_by_link(link->ctx->dc, link);
8174 			if (!link->link_enc)
8175 				link->link_enc =
8176 					link_enc_cfg_get_next_avail_link_enc(link->ctx->dc);
8177 		}
8178 
8179 		if (link->link_enc)
8180 			aconnector->base.ycbcr_420_allowed =
8181 			link->link_enc->features.dp_ycbcr420_supported ? true : false;
8182 		break;
8183 	case DRM_MODE_CONNECTOR_DVID:
8184 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8185 		break;
8186 	default:
8187 		break;
8188 	}
8189 
8190 	drm_object_attach_property(&aconnector->base.base,
8191 				dm->ddev->mode_config.scaling_mode_property,
8192 				DRM_MODE_SCALE_NONE);
8193 
8194 	drm_object_attach_property(&aconnector->base.base,
8195 				adev->mode_info.underscan_property,
8196 				UNDERSCAN_OFF);
8197 	drm_object_attach_property(&aconnector->base.base,
8198 				adev->mode_info.underscan_hborder_property,
8199 				0);
8200 	drm_object_attach_property(&aconnector->base.base,
8201 				adev->mode_info.underscan_vborder_property,
8202 				0);
8203 
8204 	if (!aconnector->mst_port)
8205 		drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
8206 
8207 	/* This defaults to the max in the range, but we want 8bpc for non-edp. */
8208 	aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
8209 	aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
8210 
8211 	if (connector_type == DRM_MODE_CONNECTOR_eDP &&
8212 	    (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
8213 		drm_object_attach_property(&aconnector->base.base,
8214 				adev->mode_info.abm_level_property, 0);
8215 	}
8216 
8217 	if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
8218 	    connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
8219 	    connector_type == DRM_MODE_CONNECTOR_eDP) {
8220 		drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
8221 
8222 		if (!aconnector->mst_port)
8223 			drm_connector_attach_vrr_capable_property(&aconnector->base);
8224 
8225 #ifdef CONFIG_DRM_AMD_DC_HDCP
8226 		if (adev->dm.hdcp_workqueue)
8227 			drm_connector_attach_content_protection_property(&aconnector->base, true);
8228 #endif
8229 	}
8230 }
8231 
8232 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
8233 			      struct i2c_msg *msgs, int num)
8234 {
8235 	struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
8236 	struct ddc_service *ddc_service = i2c->ddc_service;
8237 	struct i2c_command cmd;
8238 	int i;
8239 	int result = -EIO;
8240 
8241 	cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
8242 
8243 	if (!cmd.payloads)
8244 		return result;
8245 
8246 	cmd.number_of_payloads = num;
8247 	cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
8248 	cmd.speed = 100;
8249 
8250 	for (i = 0; i < num; i++) {
8251 		cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
8252 		cmd.payloads[i].address = msgs[i].addr;
8253 		cmd.payloads[i].length = msgs[i].len;
8254 		cmd.payloads[i].data = msgs[i].buf;
8255 	}
8256 
8257 	if (dc_submit_i2c(
8258 			ddc_service->ctx->dc,
8259 			ddc_service->ddc_pin->hw_info.ddc_channel,
8260 			&cmd))
8261 		result = num;
8262 
8263 	kfree(cmd.payloads);
8264 	return result;
8265 }
8266 
8267 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
8268 {
8269 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
8270 }
8271 
8272 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
8273 	.master_xfer = amdgpu_dm_i2c_xfer,
8274 	.functionality = amdgpu_dm_i2c_func,
8275 };
8276 
8277 static struct amdgpu_i2c_adapter *
8278 create_i2c(struct ddc_service *ddc_service,
8279 	   int link_index,
8280 	   int *res)
8281 {
8282 	struct amdgpu_device *adev = ddc_service->ctx->driver_context;
8283 	struct amdgpu_i2c_adapter *i2c;
8284 
8285 	i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
8286 	if (!i2c)
8287 		return NULL;
8288 	i2c->base.owner = THIS_MODULE;
8289 	i2c->base.class = I2C_CLASS_DDC;
8290 	i2c->base.dev.parent = &adev->pdev->dev;
8291 	i2c->base.algo = &amdgpu_dm_i2c_algo;
8292 	snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
8293 	i2c_set_adapdata(&i2c->base, i2c);
8294 	i2c->ddc_service = ddc_service;
8295 	if (i2c->ddc_service->ddc_pin)
8296 		i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
8297 
8298 	return i2c;
8299 }
8300 
8301 
8302 /*
8303  * Note: this function assumes that dc_link_detect() was called for the
8304  * dc_link which will be represented by this aconnector.
8305  */
8306 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
8307 				    struct amdgpu_dm_connector *aconnector,
8308 				    uint32_t link_index,
8309 				    struct amdgpu_encoder *aencoder)
8310 {
8311 	int res = 0;
8312 	int connector_type;
8313 	struct dc *dc = dm->dc;
8314 	struct dc_link *link = dc_get_link_at_index(dc, link_index);
8315 	struct amdgpu_i2c_adapter *i2c;
8316 
8317 	link->priv = aconnector;
8318 
8319 	DRM_DEBUG_DRIVER("%s()\n", __func__);
8320 
8321 	i2c = create_i2c(link->ddc, link->link_index, &res);
8322 	if (!i2c) {
8323 		DRM_ERROR("Failed to create i2c adapter data\n");
8324 		return -ENOMEM;
8325 	}
8326 
8327 	aconnector->i2c = i2c;
8328 	res = i2c_add_adapter(&i2c->base);
8329 
8330 	if (res) {
8331 		DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
8332 		goto out_free;
8333 	}
8334 
8335 	connector_type = to_drm_connector_type(link->connector_signal);
8336 
8337 	res = drm_connector_init_with_ddc(
8338 			dm->ddev,
8339 			&aconnector->base,
8340 			&amdgpu_dm_connector_funcs,
8341 			connector_type,
8342 			&i2c->base);
8343 
8344 	if (res) {
8345 		DRM_ERROR("connector_init failed\n");
8346 		aconnector->connector_id = -1;
8347 		goto out_free;
8348 	}
8349 
8350 	drm_connector_helper_add(
8351 			&aconnector->base,
8352 			&amdgpu_dm_connector_helper_funcs);
8353 
8354 	amdgpu_dm_connector_init_helper(
8355 		dm,
8356 		aconnector,
8357 		connector_type,
8358 		link,
8359 		link_index);
8360 
8361 	drm_connector_attach_encoder(
8362 		&aconnector->base, &aencoder->base);
8363 
8364 	if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
8365 		|| connector_type == DRM_MODE_CONNECTOR_eDP)
8366 		amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
8367 
8368 out_free:
8369 	if (res) {
8370 		kfree(i2c);
8371 		aconnector->i2c = NULL;
8372 	}
8373 	return res;
8374 }
8375 
8376 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
8377 {
8378 	switch (adev->mode_info.num_crtc) {
8379 	case 1:
8380 		return 0x1;
8381 	case 2:
8382 		return 0x3;
8383 	case 3:
8384 		return 0x7;
8385 	case 4:
8386 		return 0xf;
8387 	case 5:
8388 		return 0x1f;
8389 	case 6:
8390 	default:
8391 		return 0x3f;
8392 	}
8393 }
8394 
8395 static int amdgpu_dm_encoder_init(struct drm_device *dev,
8396 				  struct amdgpu_encoder *aencoder,
8397 				  uint32_t link_index)
8398 {
8399 	struct amdgpu_device *adev = drm_to_adev(dev);
8400 
8401 	int res = drm_encoder_init(dev,
8402 				   &aencoder->base,
8403 				   &amdgpu_dm_encoder_funcs,
8404 				   DRM_MODE_ENCODER_TMDS,
8405 				   NULL);
8406 
8407 	aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
8408 
8409 	if (!res)
8410 		aencoder->encoder_id = link_index;
8411 	else
8412 		aencoder->encoder_id = -1;
8413 
8414 	drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
8415 
8416 	return res;
8417 }
8418 
8419 static void manage_dm_interrupts(struct amdgpu_device *adev,
8420 				 struct amdgpu_crtc *acrtc,
8421 				 bool enable)
8422 {
8423 	/*
8424 	 * We have no guarantee that the frontend index maps to the same
8425 	 * backend index - some even map to more than one.
8426 	 *
8427 	 * TODO: Use a different interrupt or check DC itself for the mapping.
8428 	 */
8429 	int irq_type =
8430 		amdgpu_display_crtc_idx_to_irq_type(
8431 			adev,
8432 			acrtc->crtc_id);
8433 
8434 	if (enable) {
8435 		drm_crtc_vblank_on(&acrtc->base);
8436 		amdgpu_irq_get(
8437 			adev,
8438 			&adev->pageflip_irq,
8439 			irq_type);
8440 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8441 		amdgpu_irq_get(
8442 			adev,
8443 			&adev->vline0_irq,
8444 			irq_type);
8445 #endif
8446 	} else {
8447 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8448 		amdgpu_irq_put(
8449 			adev,
8450 			&adev->vline0_irq,
8451 			irq_type);
8452 #endif
8453 		amdgpu_irq_put(
8454 			adev,
8455 			&adev->pageflip_irq,
8456 			irq_type);
8457 		drm_crtc_vblank_off(&acrtc->base);
8458 	}
8459 }
8460 
8461 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
8462 				      struct amdgpu_crtc *acrtc)
8463 {
8464 	int irq_type =
8465 		amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
8466 
8467 	/**
8468 	 * This reads the current state for the IRQ and force reapplies
8469 	 * the setting to hardware.
8470 	 */
8471 	amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
8472 }
8473 
8474 static bool
8475 is_scaling_state_different(const struct dm_connector_state *dm_state,
8476 			   const struct dm_connector_state *old_dm_state)
8477 {
8478 	if (dm_state->scaling != old_dm_state->scaling)
8479 		return true;
8480 	if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
8481 		if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
8482 			return true;
8483 	} else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
8484 		if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
8485 			return true;
8486 	} else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
8487 		   dm_state->underscan_vborder != old_dm_state->underscan_vborder)
8488 		return true;
8489 	return false;
8490 }
8491 
8492 #ifdef CONFIG_DRM_AMD_DC_HDCP
8493 static bool is_content_protection_different(struct drm_connector_state *state,
8494 					    const struct drm_connector_state *old_state,
8495 					    const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
8496 {
8497 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8498 	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
8499 
8500 	/* Handle: Type0/1 change */
8501 	if (old_state->hdcp_content_type != state->hdcp_content_type &&
8502 	    state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
8503 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8504 		return true;
8505 	}
8506 
8507 	/* CP is being re enabled, ignore this
8508 	 *
8509 	 * Handles:	ENABLED -> DESIRED
8510 	 */
8511 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
8512 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8513 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
8514 		return false;
8515 	}
8516 
8517 	/* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
8518 	 *
8519 	 * Handles:	UNDESIRED -> ENABLED
8520 	 */
8521 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
8522 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
8523 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8524 
8525 	/* Stream removed and re-enabled
8526 	 *
8527 	 * Can sometimes overlap with the HPD case,
8528 	 * thus set update_hdcp to false to avoid
8529 	 * setting HDCP multiple times.
8530 	 *
8531 	 * Handles:	DESIRED -> DESIRED (Special case)
8532 	 */
8533 	if (!(old_state->crtc && old_state->crtc->enabled) &&
8534 		state->crtc && state->crtc->enabled &&
8535 		connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8536 		dm_con_state->update_hdcp = false;
8537 		return true;
8538 	}
8539 
8540 	/* Hot-plug, headless s3, dpms
8541 	 *
8542 	 * Only start HDCP if the display is connected/enabled.
8543 	 * update_hdcp flag will be set to false until the next
8544 	 * HPD comes in.
8545 	 *
8546 	 * Handles:	DESIRED -> DESIRED (Special case)
8547 	 */
8548 	if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
8549 	    connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
8550 		dm_con_state->update_hdcp = false;
8551 		return true;
8552 	}
8553 
8554 	/*
8555 	 * Handles:	UNDESIRED -> UNDESIRED
8556 	 *		DESIRED -> DESIRED
8557 	 *		ENABLED -> ENABLED
8558 	 */
8559 	if (old_state->content_protection == state->content_protection)
8560 		return false;
8561 
8562 	/*
8563 	 * Handles:	UNDESIRED -> DESIRED
8564 	 *		DESIRED -> UNDESIRED
8565 	 *		ENABLED -> UNDESIRED
8566 	 */
8567 	if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
8568 		return true;
8569 
8570 	/*
8571 	 * Handles:	DESIRED -> ENABLED
8572 	 */
8573 	return false;
8574 }
8575 
8576 #endif
8577 static void remove_stream(struct amdgpu_device *adev,
8578 			  struct amdgpu_crtc *acrtc,
8579 			  struct dc_stream_state *stream)
8580 {
8581 	/* this is the update mode case */
8582 
8583 	acrtc->otg_inst = -1;
8584 	acrtc->enabled = false;
8585 }
8586 
8587 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
8588 			       struct dc_cursor_position *position)
8589 {
8590 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8591 	int x, y;
8592 	int xorigin = 0, yorigin = 0;
8593 
8594 	if (!crtc || !plane->state->fb)
8595 		return 0;
8596 
8597 	if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
8598 	    (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
8599 		DRM_ERROR("%s: bad cursor width or height %d x %d\n",
8600 			  __func__,
8601 			  plane->state->crtc_w,
8602 			  plane->state->crtc_h);
8603 		return -EINVAL;
8604 	}
8605 
8606 	x = plane->state->crtc_x;
8607 	y = plane->state->crtc_y;
8608 
8609 	if (x <= -amdgpu_crtc->max_cursor_width ||
8610 	    y <= -amdgpu_crtc->max_cursor_height)
8611 		return 0;
8612 
8613 	if (x < 0) {
8614 		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
8615 		x = 0;
8616 	}
8617 	if (y < 0) {
8618 		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
8619 		y = 0;
8620 	}
8621 	position->enable = true;
8622 	position->translate_by_source = true;
8623 	position->x = x;
8624 	position->y = y;
8625 	position->x_hotspot = xorigin;
8626 	position->y_hotspot = yorigin;
8627 
8628 	return 0;
8629 }
8630 
8631 static void handle_cursor_update(struct drm_plane *plane,
8632 				 struct drm_plane_state *old_plane_state)
8633 {
8634 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
8635 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
8636 	struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
8637 	struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
8638 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8639 	uint64_t address = afb ? afb->address : 0;
8640 	struct dc_cursor_position position = {0};
8641 	struct dc_cursor_attributes attributes;
8642 	int ret;
8643 
8644 	if (!plane->state->fb && !old_plane_state->fb)
8645 		return;
8646 
8647 	DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
8648 		      __func__,
8649 		      amdgpu_crtc->crtc_id,
8650 		      plane->state->crtc_w,
8651 		      plane->state->crtc_h);
8652 
8653 	ret = get_cursor_position(plane, crtc, &position);
8654 	if (ret)
8655 		return;
8656 
8657 	if (!position.enable) {
8658 		/* turn off cursor */
8659 		if (crtc_state && crtc_state->stream) {
8660 			mutex_lock(&adev->dm.dc_lock);
8661 			dc_stream_set_cursor_position(crtc_state->stream,
8662 						      &position);
8663 			mutex_unlock(&adev->dm.dc_lock);
8664 		}
8665 		return;
8666 	}
8667 
8668 	amdgpu_crtc->cursor_width = plane->state->crtc_w;
8669 	amdgpu_crtc->cursor_height = plane->state->crtc_h;
8670 
8671 	memset(&attributes, 0, sizeof(attributes));
8672 	attributes.address.high_part = upper_32_bits(address);
8673 	attributes.address.low_part  = lower_32_bits(address);
8674 	attributes.width             = plane->state->crtc_w;
8675 	attributes.height            = plane->state->crtc_h;
8676 	attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
8677 	attributes.rotation_angle    = 0;
8678 	attributes.attribute_flags.value = 0;
8679 
8680 	attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
8681 
8682 	if (crtc_state->stream) {
8683 		mutex_lock(&adev->dm.dc_lock);
8684 		if (!dc_stream_set_cursor_attributes(crtc_state->stream,
8685 							 &attributes))
8686 			DRM_ERROR("DC failed to set cursor attributes\n");
8687 
8688 		if (!dc_stream_set_cursor_position(crtc_state->stream,
8689 						   &position))
8690 			DRM_ERROR("DC failed to set cursor position\n");
8691 		mutex_unlock(&adev->dm.dc_lock);
8692 	}
8693 }
8694 
8695 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
8696 {
8697 
8698 	assert_spin_locked(&acrtc->base.dev->event_lock);
8699 	WARN_ON(acrtc->event);
8700 
8701 	acrtc->event = acrtc->base.state->event;
8702 
8703 	/* Set the flip status */
8704 	acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
8705 
8706 	/* Mark this event as consumed */
8707 	acrtc->base.state->event = NULL;
8708 
8709 	DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
8710 		     acrtc->crtc_id);
8711 }
8712 
8713 static void update_freesync_state_on_stream(
8714 	struct amdgpu_display_manager *dm,
8715 	struct dm_crtc_state *new_crtc_state,
8716 	struct dc_stream_state *new_stream,
8717 	struct dc_plane_state *surface,
8718 	u32 flip_timestamp_in_us)
8719 {
8720 	struct mod_vrr_params vrr_params;
8721 	struct dc_info_packet vrr_infopacket = {0};
8722 	struct amdgpu_device *adev = dm->adev;
8723 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8724 	unsigned long flags;
8725 	bool pack_sdp_v1_3 = false;
8726 
8727 	if (!new_stream)
8728 		return;
8729 
8730 	/*
8731 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8732 	 * For now it's sufficient to just guard against these conditions.
8733 	 */
8734 
8735 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8736 		return;
8737 
8738 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8739         vrr_params = acrtc->dm_irq_params.vrr_params;
8740 
8741 	if (surface) {
8742 		mod_freesync_handle_preflip(
8743 			dm->freesync_module,
8744 			surface,
8745 			new_stream,
8746 			flip_timestamp_in_us,
8747 			&vrr_params);
8748 
8749 		if (adev->family < AMDGPU_FAMILY_AI &&
8750 		    amdgpu_dm_vrr_active(new_crtc_state)) {
8751 			mod_freesync_handle_v_update(dm->freesync_module,
8752 						     new_stream, &vrr_params);
8753 
8754 			/* Need to call this before the frame ends. */
8755 			dc_stream_adjust_vmin_vmax(dm->dc,
8756 						   new_crtc_state->stream,
8757 						   &vrr_params.adjust);
8758 		}
8759 	}
8760 
8761 	mod_freesync_build_vrr_infopacket(
8762 		dm->freesync_module,
8763 		new_stream,
8764 		&vrr_params,
8765 		PACKET_TYPE_VRR,
8766 		TRANSFER_FUNC_UNKNOWN,
8767 		&vrr_infopacket,
8768 		pack_sdp_v1_3);
8769 
8770 	new_crtc_state->freesync_timing_changed |=
8771 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8772 			&vrr_params.adjust,
8773 			sizeof(vrr_params.adjust)) != 0);
8774 
8775 	new_crtc_state->freesync_vrr_info_changed |=
8776 		(memcmp(&new_crtc_state->vrr_infopacket,
8777 			&vrr_infopacket,
8778 			sizeof(vrr_infopacket)) != 0);
8779 
8780 	acrtc->dm_irq_params.vrr_params = vrr_params;
8781 	new_crtc_state->vrr_infopacket = vrr_infopacket;
8782 
8783 	new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
8784 	new_stream->vrr_infopacket = vrr_infopacket;
8785 
8786 	if (new_crtc_state->freesync_vrr_info_changed)
8787 		DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
8788 			      new_crtc_state->base.crtc->base.id,
8789 			      (int)new_crtc_state->base.vrr_enabled,
8790 			      (int)vrr_params.state);
8791 
8792 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8793 }
8794 
8795 static void update_stream_irq_parameters(
8796 	struct amdgpu_display_manager *dm,
8797 	struct dm_crtc_state *new_crtc_state)
8798 {
8799 	struct dc_stream_state *new_stream = new_crtc_state->stream;
8800 	struct mod_vrr_params vrr_params;
8801 	struct mod_freesync_config config = new_crtc_state->freesync_config;
8802 	struct amdgpu_device *adev = dm->adev;
8803 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8804 	unsigned long flags;
8805 
8806 	if (!new_stream)
8807 		return;
8808 
8809 	/*
8810 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8811 	 * For now it's sufficient to just guard against these conditions.
8812 	 */
8813 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8814 		return;
8815 
8816 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8817 	vrr_params = acrtc->dm_irq_params.vrr_params;
8818 
8819 	if (new_crtc_state->vrr_supported &&
8820 	    config.min_refresh_in_uhz &&
8821 	    config.max_refresh_in_uhz) {
8822 		/*
8823 		 * if freesync compatible mode was set, config.state will be set
8824 		 * in atomic check
8825 		 */
8826 		if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
8827 		    (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
8828 		     new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
8829 			vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
8830 			vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
8831 			vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
8832 			vrr_params.state = VRR_STATE_ACTIVE_FIXED;
8833 		} else {
8834 			config.state = new_crtc_state->base.vrr_enabled ?
8835 						     VRR_STATE_ACTIVE_VARIABLE :
8836 						     VRR_STATE_INACTIVE;
8837 		}
8838 	} else {
8839 		config.state = VRR_STATE_UNSUPPORTED;
8840 	}
8841 
8842 	mod_freesync_build_vrr_params(dm->freesync_module,
8843 				      new_stream,
8844 				      &config, &vrr_params);
8845 
8846 	new_crtc_state->freesync_timing_changed |=
8847 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8848 			&vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
8849 
8850 	new_crtc_state->freesync_config = config;
8851 	/* Copy state for access from DM IRQ handler */
8852 	acrtc->dm_irq_params.freesync_config = config;
8853 	acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
8854 	acrtc->dm_irq_params.vrr_params = vrr_params;
8855 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8856 }
8857 
8858 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
8859 					    struct dm_crtc_state *new_state)
8860 {
8861 	bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
8862 	bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
8863 
8864 	if (!old_vrr_active && new_vrr_active) {
8865 		/* Transition VRR inactive -> active:
8866 		 * While VRR is active, we must not disable vblank irq, as a
8867 		 * reenable after disable would compute bogus vblank/pflip
8868 		 * timestamps if it likely happened inside display front-porch.
8869 		 *
8870 		 * We also need vupdate irq for the actual core vblank handling
8871 		 * at end of vblank.
8872 		 */
8873 		dm_set_vupdate_irq(new_state->base.crtc, true);
8874 		drm_crtc_vblank_get(new_state->base.crtc);
8875 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
8876 				 __func__, new_state->base.crtc->base.id);
8877 	} else if (old_vrr_active && !new_vrr_active) {
8878 		/* Transition VRR active -> inactive:
8879 		 * Allow vblank irq disable again for fixed refresh rate.
8880 		 */
8881 		dm_set_vupdate_irq(new_state->base.crtc, false);
8882 		drm_crtc_vblank_put(new_state->base.crtc);
8883 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
8884 				 __func__, new_state->base.crtc->base.id);
8885 	}
8886 }
8887 
8888 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
8889 {
8890 	struct drm_plane *plane;
8891 	struct drm_plane_state *old_plane_state;
8892 	int i;
8893 
8894 	/*
8895 	 * TODO: Make this per-stream so we don't issue redundant updates for
8896 	 * commits with multiple streams.
8897 	 */
8898 	for_each_old_plane_in_state(state, plane, old_plane_state, i)
8899 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
8900 			handle_cursor_update(plane, old_plane_state);
8901 }
8902 
8903 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
8904 				    struct dc_state *dc_state,
8905 				    struct drm_device *dev,
8906 				    struct amdgpu_display_manager *dm,
8907 				    struct drm_crtc *pcrtc,
8908 				    bool wait_for_vblank)
8909 {
8910 	uint32_t i;
8911 	uint64_t timestamp_ns;
8912 	struct drm_plane *plane;
8913 	struct drm_plane_state *old_plane_state, *new_plane_state;
8914 	struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
8915 	struct drm_crtc_state *new_pcrtc_state =
8916 			drm_atomic_get_new_crtc_state(state, pcrtc);
8917 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
8918 	struct dm_crtc_state *dm_old_crtc_state =
8919 			to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
8920 	int planes_count = 0, vpos, hpos;
8921 	long r;
8922 	unsigned long flags;
8923 	struct amdgpu_bo *abo;
8924 	uint32_t target_vblank, last_flip_vblank;
8925 	bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
8926 	bool pflip_present = false;
8927 	struct {
8928 		struct dc_surface_update surface_updates[MAX_SURFACES];
8929 		struct dc_plane_info plane_infos[MAX_SURFACES];
8930 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
8931 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
8932 		struct dc_stream_update stream_update;
8933 	} *bundle;
8934 
8935 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8936 
8937 	if (!bundle) {
8938 		dm_error("Failed to allocate update bundle\n");
8939 		goto cleanup;
8940 	}
8941 
8942 	/*
8943 	 * Disable the cursor first if we're disabling all the planes.
8944 	 * It'll remain on the screen after the planes are re-enabled
8945 	 * if we don't.
8946 	 */
8947 	if (acrtc_state->active_planes == 0)
8948 		amdgpu_dm_commit_cursors(state);
8949 
8950 	/* update planes when needed */
8951 	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8952 		struct drm_crtc *crtc = new_plane_state->crtc;
8953 		struct drm_crtc_state *new_crtc_state;
8954 		struct drm_framebuffer *fb = new_plane_state->fb;
8955 		struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
8956 		bool plane_needs_flip;
8957 		struct dc_plane_state *dc_plane;
8958 		struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
8959 
8960 		/* Cursor plane is handled after stream updates */
8961 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
8962 			continue;
8963 
8964 		if (!fb || !crtc || pcrtc != crtc)
8965 			continue;
8966 
8967 		new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
8968 		if (!new_crtc_state->active)
8969 			continue;
8970 
8971 		dc_plane = dm_new_plane_state->dc_state;
8972 
8973 		bundle->surface_updates[planes_count].surface = dc_plane;
8974 		if (new_pcrtc_state->color_mgmt_changed) {
8975 			bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
8976 			bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
8977 			bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
8978 		}
8979 
8980 		fill_dc_scaling_info(new_plane_state,
8981 				     &bundle->scaling_infos[planes_count]);
8982 
8983 		bundle->surface_updates[planes_count].scaling_info =
8984 			&bundle->scaling_infos[planes_count];
8985 
8986 		plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
8987 
8988 		pflip_present = pflip_present || plane_needs_flip;
8989 
8990 		if (!plane_needs_flip) {
8991 			planes_count += 1;
8992 			continue;
8993 		}
8994 
8995 		abo = gem_to_amdgpu_bo(fb->obj[0]);
8996 
8997 		/*
8998 		 * Wait for all fences on this FB. Do limited wait to avoid
8999 		 * deadlock during GPU reset when this fence will not signal
9000 		 * but we hold reservation lock for the BO.
9001 		 */
9002 		r = dma_resv_wait_timeout(abo->tbo.base.resv, true, false,
9003 					  msecs_to_jiffies(5000));
9004 		if (unlikely(r <= 0))
9005 			DRM_ERROR("Waiting for fences timed out!");
9006 
9007 		fill_dc_plane_info_and_addr(
9008 			dm->adev, new_plane_state,
9009 			afb->tiling_flags,
9010 			&bundle->plane_infos[planes_count],
9011 			&bundle->flip_addrs[planes_count].address,
9012 			afb->tmz_surface, false);
9013 
9014 		DRM_DEBUG_ATOMIC("plane: id=%d dcc_en=%d\n",
9015 				 new_plane_state->plane->index,
9016 				 bundle->plane_infos[planes_count].dcc.enable);
9017 
9018 		bundle->surface_updates[planes_count].plane_info =
9019 			&bundle->plane_infos[planes_count];
9020 
9021 		/*
9022 		 * Only allow immediate flips for fast updates that don't
9023 		 * change FB pitch, DCC state, rotation or mirroing.
9024 		 */
9025 		bundle->flip_addrs[planes_count].flip_immediate =
9026 			crtc->state->async_flip &&
9027 			acrtc_state->update_type == UPDATE_TYPE_FAST;
9028 
9029 		timestamp_ns = ktime_get_ns();
9030 		bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
9031 		bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
9032 		bundle->surface_updates[planes_count].surface = dc_plane;
9033 
9034 		if (!bundle->surface_updates[planes_count].surface) {
9035 			DRM_ERROR("No surface for CRTC: id=%d\n",
9036 					acrtc_attach->crtc_id);
9037 			continue;
9038 		}
9039 
9040 		if (plane == pcrtc->primary)
9041 			update_freesync_state_on_stream(
9042 				dm,
9043 				acrtc_state,
9044 				acrtc_state->stream,
9045 				dc_plane,
9046 				bundle->flip_addrs[planes_count].flip_timestamp_in_us);
9047 
9048 		DRM_DEBUG_ATOMIC("%s Flipping to hi: 0x%x, low: 0x%x\n",
9049 				 __func__,
9050 				 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
9051 				 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
9052 
9053 		planes_count += 1;
9054 
9055 	}
9056 
9057 	if (pflip_present) {
9058 		if (!vrr_active) {
9059 			/* Use old throttling in non-vrr fixed refresh rate mode
9060 			 * to keep flip scheduling based on target vblank counts
9061 			 * working in a backwards compatible way, e.g., for
9062 			 * clients using the GLX_OML_sync_control extension or
9063 			 * DRI3/Present extension with defined target_msc.
9064 			 */
9065 			last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
9066 		}
9067 		else {
9068 			/* For variable refresh rate mode only:
9069 			 * Get vblank of last completed flip to avoid > 1 vrr
9070 			 * flips per video frame by use of throttling, but allow
9071 			 * flip programming anywhere in the possibly large
9072 			 * variable vrr vblank interval for fine-grained flip
9073 			 * timing control and more opportunity to avoid stutter
9074 			 * on late submission of flips.
9075 			 */
9076 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9077 			last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
9078 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9079 		}
9080 
9081 		target_vblank = last_flip_vblank + wait_for_vblank;
9082 
9083 		/*
9084 		 * Wait until we're out of the vertical blank period before the one
9085 		 * targeted by the flip
9086 		 */
9087 		while ((acrtc_attach->enabled &&
9088 			(amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
9089 							    0, &vpos, &hpos, NULL,
9090 							    NULL, &pcrtc->hwmode)
9091 			 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
9092 			(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
9093 			(int)(target_vblank -
9094 			  amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
9095 			usleep_range(1000, 1100);
9096 		}
9097 
9098 		/**
9099 		 * Prepare the flip event for the pageflip interrupt to handle.
9100 		 *
9101 		 * This only works in the case where we've already turned on the
9102 		 * appropriate hardware blocks (eg. HUBP) so in the transition case
9103 		 * from 0 -> n planes we have to skip a hardware generated event
9104 		 * and rely on sending it from software.
9105 		 */
9106 		if (acrtc_attach->base.state->event &&
9107 		    acrtc_state->active_planes > 0 &&
9108 		    !acrtc_state->force_dpms_off) {
9109 			drm_crtc_vblank_get(pcrtc);
9110 
9111 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9112 
9113 			WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
9114 			prepare_flip_isr(acrtc_attach);
9115 
9116 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9117 		}
9118 
9119 		if (acrtc_state->stream) {
9120 			if (acrtc_state->freesync_vrr_info_changed)
9121 				bundle->stream_update.vrr_infopacket =
9122 					&acrtc_state->stream->vrr_infopacket;
9123 		}
9124 	}
9125 
9126 	/* Update the planes if changed or disable if we don't have any. */
9127 	if ((planes_count || acrtc_state->active_planes == 0) &&
9128 		acrtc_state->stream) {
9129 #if defined(CONFIG_DRM_AMD_DC_DCN)
9130 		/*
9131 		 * If PSR or idle optimizations are enabled then flush out
9132 		 * any pending work before hardware programming.
9133 		 */
9134 		if (dm->vblank_control_workqueue)
9135 			flush_workqueue(dm->vblank_control_workqueue);
9136 #endif
9137 
9138 		bundle->stream_update.stream = acrtc_state->stream;
9139 		if (new_pcrtc_state->mode_changed) {
9140 			bundle->stream_update.src = acrtc_state->stream->src;
9141 			bundle->stream_update.dst = acrtc_state->stream->dst;
9142 		}
9143 
9144 		if (new_pcrtc_state->color_mgmt_changed) {
9145 			/*
9146 			 * TODO: This isn't fully correct since we've actually
9147 			 * already modified the stream in place.
9148 			 */
9149 			bundle->stream_update.gamut_remap =
9150 				&acrtc_state->stream->gamut_remap_matrix;
9151 			bundle->stream_update.output_csc_transform =
9152 				&acrtc_state->stream->csc_color_matrix;
9153 			bundle->stream_update.out_transfer_func =
9154 				acrtc_state->stream->out_transfer_func;
9155 		}
9156 
9157 		acrtc_state->stream->abm_level = acrtc_state->abm_level;
9158 		if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
9159 			bundle->stream_update.abm_level = &acrtc_state->abm_level;
9160 
9161 		/*
9162 		 * If FreeSync state on the stream has changed then we need to
9163 		 * re-adjust the min/max bounds now that DC doesn't handle this
9164 		 * as part of commit.
9165 		 */
9166 		if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
9167 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9168 			dc_stream_adjust_vmin_vmax(
9169 				dm->dc, acrtc_state->stream,
9170 				&acrtc_attach->dm_irq_params.vrr_params.adjust);
9171 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9172 		}
9173 		mutex_lock(&dm->dc_lock);
9174 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
9175 				acrtc_state->stream->link->psr_settings.psr_allow_active)
9176 			amdgpu_dm_psr_disable(acrtc_state->stream);
9177 
9178 		dc_commit_updates_for_stream(dm->dc,
9179 						     bundle->surface_updates,
9180 						     planes_count,
9181 						     acrtc_state->stream,
9182 						     &bundle->stream_update,
9183 						     dc_state);
9184 
9185 		/**
9186 		 * Enable or disable the interrupts on the backend.
9187 		 *
9188 		 * Most pipes are put into power gating when unused.
9189 		 *
9190 		 * When power gating is enabled on a pipe we lose the
9191 		 * interrupt enablement state when power gating is disabled.
9192 		 *
9193 		 * So we need to update the IRQ control state in hardware
9194 		 * whenever the pipe turns on (since it could be previously
9195 		 * power gated) or off (since some pipes can't be power gated
9196 		 * on some ASICs).
9197 		 */
9198 		if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
9199 			dm_update_pflip_irq_state(drm_to_adev(dev),
9200 						  acrtc_attach);
9201 
9202 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
9203 				acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
9204 				!acrtc_state->stream->link->psr_settings.psr_feature_enabled)
9205 			amdgpu_dm_link_setup_psr(acrtc_state->stream);
9206 
9207 		/* Decrement skip count when PSR is enabled and we're doing fast updates. */
9208 		if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
9209 		    acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
9210 			struct amdgpu_dm_connector *aconn =
9211 				(struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
9212 
9213 			if (aconn->psr_skip_count > 0)
9214 				aconn->psr_skip_count--;
9215 
9216 			/* Allow PSR when skip count is 0. */
9217 			acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count;
9218 		} else {
9219 			acrtc_attach->dm_irq_params.allow_psr_entry = false;
9220 		}
9221 
9222 		mutex_unlock(&dm->dc_lock);
9223 	}
9224 
9225 	/*
9226 	 * Update cursor state *after* programming all the planes.
9227 	 * This avoids redundant programming in the case where we're going
9228 	 * to be disabling a single plane - those pipes are being disabled.
9229 	 */
9230 	if (acrtc_state->active_planes)
9231 		amdgpu_dm_commit_cursors(state);
9232 
9233 cleanup:
9234 	kfree(bundle);
9235 }
9236 
9237 static void amdgpu_dm_commit_audio(struct drm_device *dev,
9238 				   struct drm_atomic_state *state)
9239 {
9240 	struct amdgpu_device *adev = drm_to_adev(dev);
9241 	struct amdgpu_dm_connector *aconnector;
9242 	struct drm_connector *connector;
9243 	struct drm_connector_state *old_con_state, *new_con_state;
9244 	struct drm_crtc_state *new_crtc_state;
9245 	struct dm_crtc_state *new_dm_crtc_state;
9246 	const struct dc_stream_status *status;
9247 	int i, inst;
9248 
9249 	/* Notify device removals. */
9250 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9251 		if (old_con_state->crtc != new_con_state->crtc) {
9252 			/* CRTC changes require notification. */
9253 			goto notify;
9254 		}
9255 
9256 		if (!new_con_state->crtc)
9257 			continue;
9258 
9259 		new_crtc_state = drm_atomic_get_new_crtc_state(
9260 			state, new_con_state->crtc);
9261 
9262 		if (!new_crtc_state)
9263 			continue;
9264 
9265 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9266 			continue;
9267 
9268 	notify:
9269 		aconnector = to_amdgpu_dm_connector(connector);
9270 
9271 		mutex_lock(&adev->dm.audio_lock);
9272 		inst = aconnector->audio_inst;
9273 		aconnector->audio_inst = -1;
9274 		mutex_unlock(&adev->dm.audio_lock);
9275 
9276 		amdgpu_dm_audio_eld_notify(adev, inst);
9277 	}
9278 
9279 	/* Notify audio device additions. */
9280 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
9281 		if (!new_con_state->crtc)
9282 			continue;
9283 
9284 		new_crtc_state = drm_atomic_get_new_crtc_state(
9285 			state, new_con_state->crtc);
9286 
9287 		if (!new_crtc_state)
9288 			continue;
9289 
9290 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9291 			continue;
9292 
9293 		new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
9294 		if (!new_dm_crtc_state->stream)
9295 			continue;
9296 
9297 		status = dc_stream_get_status(new_dm_crtc_state->stream);
9298 		if (!status)
9299 			continue;
9300 
9301 		aconnector = to_amdgpu_dm_connector(connector);
9302 
9303 		mutex_lock(&adev->dm.audio_lock);
9304 		inst = status->audio_inst;
9305 		aconnector->audio_inst = inst;
9306 		mutex_unlock(&adev->dm.audio_lock);
9307 
9308 		amdgpu_dm_audio_eld_notify(adev, inst);
9309 	}
9310 }
9311 
9312 /*
9313  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
9314  * @crtc_state: the DRM CRTC state
9315  * @stream_state: the DC stream state.
9316  *
9317  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
9318  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
9319  */
9320 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
9321 						struct dc_stream_state *stream_state)
9322 {
9323 	stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
9324 }
9325 
9326 /**
9327  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
9328  * @state: The atomic state to commit
9329  *
9330  * This will tell DC to commit the constructed DC state from atomic_check,
9331  * programming the hardware. Any failures here implies a hardware failure, since
9332  * atomic check should have filtered anything non-kosher.
9333  */
9334 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
9335 {
9336 	struct drm_device *dev = state->dev;
9337 	struct amdgpu_device *adev = drm_to_adev(dev);
9338 	struct amdgpu_display_manager *dm = &adev->dm;
9339 	struct dm_atomic_state *dm_state;
9340 	struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
9341 	uint32_t i, j;
9342 	struct drm_crtc *crtc;
9343 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9344 	unsigned long flags;
9345 	bool wait_for_vblank = true;
9346 	struct drm_connector *connector;
9347 	struct drm_connector_state *old_con_state, *new_con_state;
9348 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9349 	int crtc_disable_count = 0;
9350 	bool mode_set_reset_required = false;
9351 
9352 	trace_amdgpu_dm_atomic_commit_tail_begin(state);
9353 
9354 	drm_atomic_helper_update_legacy_modeset_state(dev, state);
9355 
9356 	dm_state = dm_atomic_get_new_state(state);
9357 	if (dm_state && dm_state->context) {
9358 		dc_state = dm_state->context;
9359 	} else {
9360 		/* No state changes, retain current state. */
9361 		dc_state_temp = dc_create_state(dm->dc);
9362 		ASSERT(dc_state_temp);
9363 		dc_state = dc_state_temp;
9364 		dc_resource_state_copy_construct_current(dm->dc, dc_state);
9365 	}
9366 
9367 	for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
9368 				       new_crtc_state, i) {
9369 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9370 
9371 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9372 
9373 		if (old_crtc_state->active &&
9374 		    (!new_crtc_state->active ||
9375 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9376 			manage_dm_interrupts(adev, acrtc, false);
9377 			dc_stream_release(dm_old_crtc_state->stream);
9378 		}
9379 	}
9380 
9381 	drm_atomic_helper_calc_timestamping_constants(state);
9382 
9383 	/* update changed items */
9384 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9385 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9386 
9387 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9388 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9389 
9390 		DRM_DEBUG_ATOMIC(
9391 			"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9392 			"planes_changed:%d, mode_changed:%d,active_changed:%d,"
9393 			"connectors_changed:%d\n",
9394 			acrtc->crtc_id,
9395 			new_crtc_state->enable,
9396 			new_crtc_state->active,
9397 			new_crtc_state->planes_changed,
9398 			new_crtc_state->mode_changed,
9399 			new_crtc_state->active_changed,
9400 			new_crtc_state->connectors_changed);
9401 
9402 		/* Disable cursor if disabling crtc */
9403 		if (old_crtc_state->active && !new_crtc_state->active) {
9404 			struct dc_cursor_position position;
9405 
9406 			memset(&position, 0, sizeof(position));
9407 			mutex_lock(&dm->dc_lock);
9408 			dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
9409 			mutex_unlock(&dm->dc_lock);
9410 		}
9411 
9412 		/* Copy all transient state flags into dc state */
9413 		if (dm_new_crtc_state->stream) {
9414 			amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
9415 							    dm_new_crtc_state->stream);
9416 		}
9417 
9418 		/* handles headless hotplug case, updating new_state and
9419 		 * aconnector as needed
9420 		 */
9421 
9422 		if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
9423 
9424 			DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
9425 
9426 			if (!dm_new_crtc_state->stream) {
9427 				/*
9428 				 * this could happen because of issues with
9429 				 * userspace notifications delivery.
9430 				 * In this case userspace tries to set mode on
9431 				 * display which is disconnected in fact.
9432 				 * dc_sink is NULL in this case on aconnector.
9433 				 * We expect reset mode will come soon.
9434 				 *
9435 				 * This can also happen when unplug is done
9436 				 * during resume sequence ended
9437 				 *
9438 				 * In this case, we want to pretend we still
9439 				 * have a sink to keep the pipe running so that
9440 				 * hw state is consistent with the sw state
9441 				 */
9442 				DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9443 						__func__, acrtc->base.base.id);
9444 				continue;
9445 			}
9446 
9447 			if (dm_old_crtc_state->stream)
9448 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
9449 
9450 			pm_runtime_get_noresume(dev->dev);
9451 
9452 			acrtc->enabled = true;
9453 			acrtc->hw_mode = new_crtc_state->mode;
9454 			crtc->hwmode = new_crtc_state->mode;
9455 			mode_set_reset_required = true;
9456 		} else if (modereset_required(new_crtc_state)) {
9457 			DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
9458 			/* i.e. reset mode */
9459 			if (dm_old_crtc_state->stream)
9460 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
9461 
9462 			mode_set_reset_required = true;
9463 		}
9464 	} /* for_each_crtc_in_state() */
9465 
9466 	if (dc_state) {
9467 		/* if there mode set or reset, disable eDP PSR */
9468 		if (mode_set_reset_required) {
9469 #if defined(CONFIG_DRM_AMD_DC_DCN)
9470 			if (dm->vblank_control_workqueue)
9471 				flush_workqueue(dm->vblank_control_workqueue);
9472 #endif
9473 			amdgpu_dm_psr_disable_all(dm);
9474 		}
9475 
9476 		dm_enable_per_frame_crtc_master_sync(dc_state);
9477 		mutex_lock(&dm->dc_lock);
9478 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
9479 #if defined(CONFIG_DRM_AMD_DC_DCN)
9480                /* Allow idle optimization when vblank count is 0 for display off */
9481                if (dm->active_vblank_irq_count == 0)
9482                    dc_allow_idle_optimizations(dm->dc,true);
9483 #endif
9484 		mutex_unlock(&dm->dc_lock);
9485 	}
9486 
9487 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9488 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9489 
9490 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9491 
9492 		if (dm_new_crtc_state->stream != NULL) {
9493 			const struct dc_stream_status *status =
9494 					dc_stream_get_status(dm_new_crtc_state->stream);
9495 
9496 			if (!status)
9497 				status = dc_stream_get_status_from_state(dc_state,
9498 									 dm_new_crtc_state->stream);
9499 			if (!status)
9500 				DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
9501 			else
9502 				acrtc->otg_inst = status->primary_otg_inst;
9503 		}
9504 	}
9505 #ifdef CONFIG_DRM_AMD_DC_HDCP
9506 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9507 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9508 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9509 		struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9510 
9511 		new_crtc_state = NULL;
9512 
9513 		if (acrtc)
9514 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9515 
9516 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9517 
9518 		if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
9519 		    connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
9520 			hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
9521 			new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
9522 			dm_new_con_state->update_hdcp = true;
9523 			continue;
9524 		}
9525 
9526 		if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
9527 			hdcp_update_display(
9528 				adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
9529 				new_con_state->hdcp_content_type,
9530 				new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
9531 	}
9532 #endif
9533 
9534 	/* Handle connector state changes */
9535 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9536 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9537 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9538 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9539 		struct dc_surface_update dummy_updates[MAX_SURFACES];
9540 		struct dc_stream_update stream_update;
9541 		struct dc_info_packet hdr_packet;
9542 		struct dc_stream_status *status = NULL;
9543 		bool abm_changed, hdr_changed, scaling_changed;
9544 
9545 		memset(&dummy_updates, 0, sizeof(dummy_updates));
9546 		memset(&stream_update, 0, sizeof(stream_update));
9547 
9548 		if (acrtc) {
9549 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9550 			old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
9551 		}
9552 
9553 		/* Skip any modesets/resets */
9554 		if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
9555 			continue;
9556 
9557 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9558 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9559 
9560 		scaling_changed = is_scaling_state_different(dm_new_con_state,
9561 							     dm_old_con_state);
9562 
9563 		abm_changed = dm_new_crtc_state->abm_level !=
9564 			      dm_old_crtc_state->abm_level;
9565 
9566 		hdr_changed =
9567 			!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
9568 
9569 		if (!scaling_changed && !abm_changed && !hdr_changed)
9570 			continue;
9571 
9572 		stream_update.stream = dm_new_crtc_state->stream;
9573 		if (scaling_changed) {
9574 			update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
9575 					dm_new_con_state, dm_new_crtc_state->stream);
9576 
9577 			stream_update.src = dm_new_crtc_state->stream->src;
9578 			stream_update.dst = dm_new_crtc_state->stream->dst;
9579 		}
9580 
9581 		if (abm_changed) {
9582 			dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
9583 
9584 			stream_update.abm_level = &dm_new_crtc_state->abm_level;
9585 		}
9586 
9587 		if (hdr_changed) {
9588 			fill_hdr_info_packet(new_con_state, &hdr_packet);
9589 			stream_update.hdr_static_metadata = &hdr_packet;
9590 		}
9591 
9592 		status = dc_stream_get_status(dm_new_crtc_state->stream);
9593 
9594 		if (WARN_ON(!status))
9595 			continue;
9596 
9597 		WARN_ON(!status->plane_count);
9598 
9599 		/*
9600 		 * TODO: DC refuses to perform stream updates without a dc_surface_update.
9601 		 * Here we create an empty update on each plane.
9602 		 * To fix this, DC should permit updating only stream properties.
9603 		 */
9604 		for (j = 0; j < status->plane_count; j++)
9605 			dummy_updates[j].surface = status->plane_states[0];
9606 
9607 
9608 		mutex_lock(&dm->dc_lock);
9609 		dc_commit_updates_for_stream(dm->dc,
9610 						     dummy_updates,
9611 						     status->plane_count,
9612 						     dm_new_crtc_state->stream,
9613 						     &stream_update,
9614 						     dc_state);
9615 		mutex_unlock(&dm->dc_lock);
9616 	}
9617 
9618 	/* Count number of newly disabled CRTCs for dropping PM refs later. */
9619 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
9620 				      new_crtc_state, i) {
9621 		if (old_crtc_state->active && !new_crtc_state->active)
9622 			crtc_disable_count++;
9623 
9624 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9625 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9626 
9627 		/* For freesync config update on crtc state and params for irq */
9628 		update_stream_irq_parameters(dm, dm_new_crtc_state);
9629 
9630 		/* Handle vrr on->off / off->on transitions */
9631 		amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
9632 						dm_new_crtc_state);
9633 	}
9634 
9635 	/**
9636 	 * Enable interrupts for CRTCs that are newly enabled or went through
9637 	 * a modeset. It was intentionally deferred until after the front end
9638 	 * state was modified to wait until the OTG was on and so the IRQ
9639 	 * handlers didn't access stale or invalid state.
9640 	 */
9641 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9642 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9643 #ifdef CONFIG_DEBUG_FS
9644 		bool configure_crc = false;
9645 		enum amdgpu_dm_pipe_crc_source cur_crc_src;
9646 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9647 		struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk;
9648 #endif
9649 		spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9650 		cur_crc_src = acrtc->dm_irq_params.crc_src;
9651 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9652 #endif
9653 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9654 
9655 		if (new_crtc_state->active &&
9656 		    (!old_crtc_state->active ||
9657 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9658 			dc_stream_retain(dm_new_crtc_state->stream);
9659 			acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
9660 			manage_dm_interrupts(adev, acrtc, true);
9661 
9662 #ifdef CONFIG_DEBUG_FS
9663 			/**
9664 			 * Frontend may have changed so reapply the CRC capture
9665 			 * settings for the stream.
9666 			 */
9667 			dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9668 
9669 			if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
9670 				configure_crc = true;
9671 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9672 				if (amdgpu_dm_crc_window_is_activated(crtc)) {
9673 					spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9674 					acrtc->dm_irq_params.crc_window.update_win = true;
9675 					acrtc->dm_irq_params.crc_window.skip_frame_cnt = 2;
9676 					spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
9677 					crc_rd_wrk->crtc = crtc;
9678 					spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
9679 					spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9680 				}
9681 #endif
9682 			}
9683 
9684 			if (configure_crc)
9685 				if (amdgpu_dm_crtc_configure_crc_source(
9686 					crtc, dm_new_crtc_state, cur_crc_src))
9687 					DRM_DEBUG_DRIVER("Failed to configure crc source");
9688 #endif
9689 		}
9690 	}
9691 
9692 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
9693 		if (new_crtc_state->async_flip)
9694 			wait_for_vblank = false;
9695 
9696 	/* update planes when needed per crtc*/
9697 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
9698 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9699 
9700 		if (dm_new_crtc_state->stream)
9701 			amdgpu_dm_commit_planes(state, dc_state, dev,
9702 						dm, crtc, wait_for_vblank);
9703 	}
9704 
9705 	/* Update audio instances for each connector. */
9706 	amdgpu_dm_commit_audio(dev, state);
9707 
9708 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||		\
9709 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
9710 	/* restore the backlight level */
9711 	for (i = 0; i < dm->num_of_edps; i++) {
9712 		if (dm->backlight_dev[i] &&
9713 		    (amdgpu_dm_backlight_get_level(dm, i) != dm->brightness[i]))
9714 			amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
9715 	}
9716 #endif
9717 	/*
9718 	 * send vblank event on all events not handled in flip and
9719 	 * mark consumed event for drm_atomic_helper_commit_hw_done
9720 	 */
9721 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9722 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9723 
9724 		if (new_crtc_state->event)
9725 			drm_send_event_locked(dev, &new_crtc_state->event->base);
9726 
9727 		new_crtc_state->event = NULL;
9728 	}
9729 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9730 
9731 	/* Signal HW programming completion */
9732 	drm_atomic_helper_commit_hw_done(state);
9733 
9734 	if (wait_for_vblank)
9735 		drm_atomic_helper_wait_for_flip_done(dev, state);
9736 
9737 	drm_atomic_helper_cleanup_planes(dev, state);
9738 
9739 	/* return the stolen vga memory back to VRAM */
9740 	if (!adev->mman.keep_stolen_vga_memory)
9741 		amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
9742 	amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
9743 
9744 	/*
9745 	 * Finally, drop a runtime PM reference for each newly disabled CRTC,
9746 	 * so we can put the GPU into runtime suspend if we're not driving any
9747 	 * displays anymore
9748 	 */
9749 	for (i = 0; i < crtc_disable_count; i++)
9750 		pm_runtime_put_autosuspend(dev->dev);
9751 	pm_runtime_mark_last_busy(dev->dev);
9752 
9753 	if (dc_state_temp)
9754 		dc_release_state(dc_state_temp);
9755 }
9756 
9757 
9758 static int dm_force_atomic_commit(struct drm_connector *connector)
9759 {
9760 	int ret = 0;
9761 	struct drm_device *ddev = connector->dev;
9762 	struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
9763 	struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9764 	struct drm_plane *plane = disconnected_acrtc->base.primary;
9765 	struct drm_connector_state *conn_state;
9766 	struct drm_crtc_state *crtc_state;
9767 	struct drm_plane_state *plane_state;
9768 
9769 	if (!state)
9770 		return -ENOMEM;
9771 
9772 	state->acquire_ctx = ddev->mode_config.acquire_ctx;
9773 
9774 	/* Construct an atomic state to restore previous display setting */
9775 
9776 	/*
9777 	 * Attach connectors to drm_atomic_state
9778 	 */
9779 	conn_state = drm_atomic_get_connector_state(state, connector);
9780 
9781 	ret = PTR_ERR_OR_ZERO(conn_state);
9782 	if (ret)
9783 		goto out;
9784 
9785 	/* Attach crtc to drm_atomic_state*/
9786 	crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
9787 
9788 	ret = PTR_ERR_OR_ZERO(crtc_state);
9789 	if (ret)
9790 		goto out;
9791 
9792 	/* force a restore */
9793 	crtc_state->mode_changed = true;
9794 
9795 	/* Attach plane to drm_atomic_state */
9796 	plane_state = drm_atomic_get_plane_state(state, plane);
9797 
9798 	ret = PTR_ERR_OR_ZERO(plane_state);
9799 	if (ret)
9800 		goto out;
9801 
9802 	/* Call commit internally with the state we just constructed */
9803 	ret = drm_atomic_commit(state);
9804 
9805 out:
9806 	drm_atomic_state_put(state);
9807 	if (ret)
9808 		DRM_ERROR("Restoring old state failed with %i\n", ret);
9809 
9810 	return ret;
9811 }
9812 
9813 /*
9814  * This function handles all cases when set mode does not come upon hotplug.
9815  * This includes when a display is unplugged then plugged back into the
9816  * same port and when running without usermode desktop manager supprot
9817  */
9818 void dm_restore_drm_connector_state(struct drm_device *dev,
9819 				    struct drm_connector *connector)
9820 {
9821 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9822 	struct amdgpu_crtc *disconnected_acrtc;
9823 	struct dm_crtc_state *acrtc_state;
9824 
9825 	if (!aconnector->dc_sink || !connector->state || !connector->encoder)
9826 		return;
9827 
9828 	disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9829 	if (!disconnected_acrtc)
9830 		return;
9831 
9832 	acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
9833 	if (!acrtc_state->stream)
9834 		return;
9835 
9836 	/*
9837 	 * If the previous sink is not released and different from the current,
9838 	 * we deduce we are in a state where we can not rely on usermode call
9839 	 * to turn on the display, so we do it here
9840 	 */
9841 	if (acrtc_state->stream->sink != aconnector->dc_sink)
9842 		dm_force_atomic_commit(&aconnector->base);
9843 }
9844 
9845 /*
9846  * Grabs all modesetting locks to serialize against any blocking commits,
9847  * Waits for completion of all non blocking commits.
9848  */
9849 static int do_aquire_global_lock(struct drm_device *dev,
9850 				 struct drm_atomic_state *state)
9851 {
9852 	struct drm_crtc *crtc;
9853 	struct drm_crtc_commit *commit;
9854 	long ret;
9855 
9856 	/*
9857 	 * Adding all modeset locks to aquire_ctx will
9858 	 * ensure that when the framework release it the
9859 	 * extra locks we are locking here will get released to
9860 	 */
9861 	ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
9862 	if (ret)
9863 		return ret;
9864 
9865 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
9866 		spin_lock(&crtc->commit_lock);
9867 		commit = list_first_entry_or_null(&crtc->commit_list,
9868 				struct drm_crtc_commit, commit_entry);
9869 		if (commit)
9870 			drm_crtc_commit_get(commit);
9871 		spin_unlock(&crtc->commit_lock);
9872 
9873 		if (!commit)
9874 			continue;
9875 
9876 		/*
9877 		 * Make sure all pending HW programming completed and
9878 		 * page flips done
9879 		 */
9880 		ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
9881 
9882 		if (ret > 0)
9883 			ret = wait_for_completion_interruptible_timeout(
9884 					&commit->flip_done, 10*HZ);
9885 
9886 		if (ret == 0)
9887 			DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
9888 				  "timed out\n", crtc->base.id, crtc->name);
9889 
9890 		drm_crtc_commit_put(commit);
9891 	}
9892 
9893 	return ret < 0 ? ret : 0;
9894 }
9895 
9896 static void get_freesync_config_for_crtc(
9897 	struct dm_crtc_state *new_crtc_state,
9898 	struct dm_connector_state *new_con_state)
9899 {
9900 	struct mod_freesync_config config = {0};
9901 	struct amdgpu_dm_connector *aconnector =
9902 			to_amdgpu_dm_connector(new_con_state->base.connector);
9903 	struct drm_display_mode *mode = &new_crtc_state->base.mode;
9904 	int vrefresh = drm_mode_vrefresh(mode);
9905 	bool fs_vid_mode = false;
9906 
9907 	new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
9908 					vrefresh >= aconnector->min_vfreq &&
9909 					vrefresh <= aconnector->max_vfreq;
9910 
9911 	if (new_crtc_state->vrr_supported) {
9912 		new_crtc_state->stream->ignore_msa_timing_param = true;
9913 		fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
9914 
9915 		config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
9916 		config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
9917 		config.vsif_supported = true;
9918 		config.btr = true;
9919 
9920 		if (fs_vid_mode) {
9921 			config.state = VRR_STATE_ACTIVE_FIXED;
9922 			config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
9923 			goto out;
9924 		} else if (new_crtc_state->base.vrr_enabled) {
9925 			config.state = VRR_STATE_ACTIVE_VARIABLE;
9926 		} else {
9927 			config.state = VRR_STATE_INACTIVE;
9928 		}
9929 	}
9930 out:
9931 	new_crtc_state->freesync_config = config;
9932 }
9933 
9934 static void reset_freesync_config_for_crtc(
9935 	struct dm_crtc_state *new_crtc_state)
9936 {
9937 	new_crtc_state->vrr_supported = false;
9938 
9939 	memset(&new_crtc_state->vrr_infopacket, 0,
9940 	       sizeof(new_crtc_state->vrr_infopacket));
9941 }
9942 
9943 static bool
9944 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
9945 				 struct drm_crtc_state *new_crtc_state)
9946 {
9947 	struct drm_display_mode old_mode, new_mode;
9948 
9949 	if (!old_crtc_state || !new_crtc_state)
9950 		return false;
9951 
9952 	old_mode = old_crtc_state->mode;
9953 	new_mode = new_crtc_state->mode;
9954 
9955 	if (old_mode.clock       == new_mode.clock &&
9956 	    old_mode.hdisplay    == new_mode.hdisplay &&
9957 	    old_mode.vdisplay    == new_mode.vdisplay &&
9958 	    old_mode.htotal      == new_mode.htotal &&
9959 	    old_mode.vtotal      != new_mode.vtotal &&
9960 	    old_mode.hsync_start == new_mode.hsync_start &&
9961 	    old_mode.vsync_start != new_mode.vsync_start &&
9962 	    old_mode.hsync_end   == new_mode.hsync_end &&
9963 	    old_mode.vsync_end   != new_mode.vsync_end &&
9964 	    old_mode.hskew       == new_mode.hskew &&
9965 	    old_mode.vscan       == new_mode.vscan &&
9966 	    (old_mode.vsync_end - old_mode.vsync_start) ==
9967 	    (new_mode.vsync_end - new_mode.vsync_start))
9968 		return true;
9969 
9970 	return false;
9971 }
9972 
9973 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
9974 	uint64_t num, den, res;
9975 	struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
9976 
9977 	dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
9978 
9979 	num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
9980 	den = (unsigned long long)new_crtc_state->mode.htotal *
9981 	      (unsigned long long)new_crtc_state->mode.vtotal;
9982 
9983 	res = div_u64(num, den);
9984 	dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
9985 }
9986 
9987 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
9988 				struct drm_atomic_state *state,
9989 				struct drm_crtc *crtc,
9990 				struct drm_crtc_state *old_crtc_state,
9991 				struct drm_crtc_state *new_crtc_state,
9992 				bool enable,
9993 				bool *lock_and_validation_needed)
9994 {
9995 	struct dm_atomic_state *dm_state = NULL;
9996 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9997 	struct dc_stream_state *new_stream;
9998 	int ret = 0;
9999 
10000 	/*
10001 	 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
10002 	 * update changed items
10003 	 */
10004 	struct amdgpu_crtc *acrtc = NULL;
10005 	struct amdgpu_dm_connector *aconnector = NULL;
10006 	struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
10007 	struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
10008 
10009 	new_stream = NULL;
10010 
10011 	dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10012 	dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10013 	acrtc = to_amdgpu_crtc(crtc);
10014 	aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
10015 
10016 	/* TODO This hack should go away */
10017 	if (aconnector && enable) {
10018 		/* Make sure fake sink is created in plug-in scenario */
10019 		drm_new_conn_state = drm_atomic_get_new_connector_state(state,
10020 							    &aconnector->base);
10021 		drm_old_conn_state = drm_atomic_get_old_connector_state(state,
10022 							    &aconnector->base);
10023 
10024 		if (IS_ERR(drm_new_conn_state)) {
10025 			ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
10026 			goto fail;
10027 		}
10028 
10029 		dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
10030 		dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
10031 
10032 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10033 			goto skip_modeset;
10034 
10035 		new_stream = create_validate_stream_for_sink(aconnector,
10036 							     &new_crtc_state->mode,
10037 							     dm_new_conn_state,
10038 							     dm_old_crtc_state->stream);
10039 
10040 		/*
10041 		 * we can have no stream on ACTION_SET if a display
10042 		 * was disconnected during S3, in this case it is not an
10043 		 * error, the OS will be updated after detection, and
10044 		 * will do the right thing on next atomic commit
10045 		 */
10046 
10047 		if (!new_stream) {
10048 			DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
10049 					__func__, acrtc->base.base.id);
10050 			ret = -ENOMEM;
10051 			goto fail;
10052 		}
10053 
10054 		/*
10055 		 * TODO: Check VSDB bits to decide whether this should
10056 		 * be enabled or not.
10057 		 */
10058 		new_stream->triggered_crtc_reset.enabled =
10059 			dm->force_timing_sync;
10060 
10061 		dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10062 
10063 		ret = fill_hdr_info_packet(drm_new_conn_state,
10064 					   &new_stream->hdr_static_metadata);
10065 		if (ret)
10066 			goto fail;
10067 
10068 		/*
10069 		 * If we already removed the old stream from the context
10070 		 * (and set the new stream to NULL) then we can't reuse
10071 		 * the old stream even if the stream and scaling are unchanged.
10072 		 * We'll hit the BUG_ON and black screen.
10073 		 *
10074 		 * TODO: Refactor this function to allow this check to work
10075 		 * in all conditions.
10076 		 */
10077 		if (amdgpu_freesync_vid_mode &&
10078 		    dm_new_crtc_state->stream &&
10079 		    is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
10080 			goto skip_modeset;
10081 
10082 		if (dm_new_crtc_state->stream &&
10083 		    dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
10084 		    dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
10085 			new_crtc_state->mode_changed = false;
10086 			DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
10087 					 new_crtc_state->mode_changed);
10088 		}
10089 	}
10090 
10091 	/* mode_changed flag may get updated above, need to check again */
10092 	if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10093 		goto skip_modeset;
10094 
10095 	DRM_DEBUG_ATOMIC(
10096 		"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
10097 		"planes_changed:%d, mode_changed:%d,active_changed:%d,"
10098 		"connectors_changed:%d\n",
10099 		acrtc->crtc_id,
10100 		new_crtc_state->enable,
10101 		new_crtc_state->active,
10102 		new_crtc_state->planes_changed,
10103 		new_crtc_state->mode_changed,
10104 		new_crtc_state->active_changed,
10105 		new_crtc_state->connectors_changed);
10106 
10107 	/* Remove stream for any changed/disabled CRTC */
10108 	if (!enable) {
10109 
10110 		if (!dm_old_crtc_state->stream)
10111 			goto skip_modeset;
10112 
10113 		if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
10114 		    is_timing_unchanged_for_freesync(new_crtc_state,
10115 						     old_crtc_state)) {
10116 			new_crtc_state->mode_changed = false;
10117 			DRM_DEBUG_DRIVER(
10118 				"Mode change not required for front porch change, "
10119 				"setting mode_changed to %d",
10120 				new_crtc_state->mode_changed);
10121 
10122 			set_freesync_fixed_config(dm_new_crtc_state);
10123 
10124 			goto skip_modeset;
10125 		} else if (amdgpu_freesync_vid_mode && aconnector &&
10126 			   is_freesync_video_mode(&new_crtc_state->mode,
10127 						  aconnector)) {
10128 			struct drm_display_mode *high_mode;
10129 
10130 			high_mode = get_highest_refresh_rate_mode(aconnector, false);
10131 			if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) {
10132 				set_freesync_fixed_config(dm_new_crtc_state);
10133 			}
10134 		}
10135 
10136 		ret = dm_atomic_get_state(state, &dm_state);
10137 		if (ret)
10138 			goto fail;
10139 
10140 		DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
10141 				crtc->base.id);
10142 
10143 		/* i.e. reset mode */
10144 		if (dc_remove_stream_from_ctx(
10145 				dm->dc,
10146 				dm_state->context,
10147 				dm_old_crtc_state->stream) != DC_OK) {
10148 			ret = -EINVAL;
10149 			goto fail;
10150 		}
10151 
10152 		dc_stream_release(dm_old_crtc_state->stream);
10153 		dm_new_crtc_state->stream = NULL;
10154 
10155 		reset_freesync_config_for_crtc(dm_new_crtc_state);
10156 
10157 		*lock_and_validation_needed = true;
10158 
10159 	} else {/* Add stream for any updated/enabled CRTC */
10160 		/*
10161 		 * Quick fix to prevent NULL pointer on new_stream when
10162 		 * added MST connectors not found in existing crtc_state in the chained mode
10163 		 * TODO: need to dig out the root cause of that
10164 		 */
10165 		if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
10166 			goto skip_modeset;
10167 
10168 		if (modereset_required(new_crtc_state))
10169 			goto skip_modeset;
10170 
10171 		if (modeset_required(new_crtc_state, new_stream,
10172 				     dm_old_crtc_state->stream)) {
10173 
10174 			WARN_ON(dm_new_crtc_state->stream);
10175 
10176 			ret = dm_atomic_get_state(state, &dm_state);
10177 			if (ret)
10178 				goto fail;
10179 
10180 			dm_new_crtc_state->stream = new_stream;
10181 
10182 			dc_stream_retain(new_stream);
10183 
10184 			DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
10185 					 crtc->base.id);
10186 
10187 			if (dc_add_stream_to_ctx(
10188 					dm->dc,
10189 					dm_state->context,
10190 					dm_new_crtc_state->stream) != DC_OK) {
10191 				ret = -EINVAL;
10192 				goto fail;
10193 			}
10194 
10195 			*lock_and_validation_needed = true;
10196 		}
10197 	}
10198 
10199 skip_modeset:
10200 	/* Release extra reference */
10201 	if (new_stream)
10202 		 dc_stream_release(new_stream);
10203 
10204 	/*
10205 	 * We want to do dc stream updates that do not require a
10206 	 * full modeset below.
10207 	 */
10208 	if (!(enable && aconnector && new_crtc_state->active))
10209 		return 0;
10210 	/*
10211 	 * Given above conditions, the dc state cannot be NULL because:
10212 	 * 1. We're in the process of enabling CRTCs (just been added
10213 	 *    to the dc context, or already is on the context)
10214 	 * 2. Has a valid connector attached, and
10215 	 * 3. Is currently active and enabled.
10216 	 * => The dc stream state currently exists.
10217 	 */
10218 	BUG_ON(dm_new_crtc_state->stream == NULL);
10219 
10220 	/* Scaling or underscan settings */
10221 	if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
10222 				drm_atomic_crtc_needs_modeset(new_crtc_state))
10223 		update_stream_scaling_settings(
10224 			&new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
10225 
10226 	/* ABM settings */
10227 	dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10228 
10229 	/*
10230 	 * Color management settings. We also update color properties
10231 	 * when a modeset is needed, to ensure it gets reprogrammed.
10232 	 */
10233 	if (dm_new_crtc_state->base.color_mgmt_changed ||
10234 	    drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10235 		ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
10236 		if (ret)
10237 			goto fail;
10238 	}
10239 
10240 	/* Update Freesync settings. */
10241 	get_freesync_config_for_crtc(dm_new_crtc_state,
10242 				     dm_new_conn_state);
10243 
10244 	return ret;
10245 
10246 fail:
10247 	if (new_stream)
10248 		dc_stream_release(new_stream);
10249 	return ret;
10250 }
10251 
10252 static bool should_reset_plane(struct drm_atomic_state *state,
10253 			       struct drm_plane *plane,
10254 			       struct drm_plane_state *old_plane_state,
10255 			       struct drm_plane_state *new_plane_state)
10256 {
10257 	struct drm_plane *other;
10258 	struct drm_plane_state *old_other_state, *new_other_state;
10259 	struct drm_crtc_state *new_crtc_state;
10260 	int i;
10261 
10262 	/*
10263 	 * TODO: Remove this hack once the checks below are sufficient
10264 	 * enough to determine when we need to reset all the planes on
10265 	 * the stream.
10266 	 */
10267 	if (state->allow_modeset)
10268 		return true;
10269 
10270 	/* Exit early if we know that we're adding or removing the plane. */
10271 	if (old_plane_state->crtc != new_plane_state->crtc)
10272 		return true;
10273 
10274 	/* old crtc == new_crtc == NULL, plane not in context. */
10275 	if (!new_plane_state->crtc)
10276 		return false;
10277 
10278 	new_crtc_state =
10279 		drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
10280 
10281 	if (!new_crtc_state)
10282 		return true;
10283 
10284 	/* CRTC Degamma changes currently require us to recreate planes. */
10285 	if (new_crtc_state->color_mgmt_changed)
10286 		return true;
10287 
10288 	if (drm_atomic_crtc_needs_modeset(new_crtc_state))
10289 		return true;
10290 
10291 	/*
10292 	 * If there are any new primary or overlay planes being added or
10293 	 * removed then the z-order can potentially change. To ensure
10294 	 * correct z-order and pipe acquisition the current DC architecture
10295 	 * requires us to remove and recreate all existing planes.
10296 	 *
10297 	 * TODO: Come up with a more elegant solution for this.
10298 	 */
10299 	for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
10300 		struct amdgpu_framebuffer *old_afb, *new_afb;
10301 		if (other->type == DRM_PLANE_TYPE_CURSOR)
10302 			continue;
10303 
10304 		if (old_other_state->crtc != new_plane_state->crtc &&
10305 		    new_other_state->crtc != new_plane_state->crtc)
10306 			continue;
10307 
10308 		if (old_other_state->crtc != new_other_state->crtc)
10309 			return true;
10310 
10311 		/* Src/dst size and scaling updates. */
10312 		if (old_other_state->src_w != new_other_state->src_w ||
10313 		    old_other_state->src_h != new_other_state->src_h ||
10314 		    old_other_state->crtc_w != new_other_state->crtc_w ||
10315 		    old_other_state->crtc_h != new_other_state->crtc_h)
10316 			return true;
10317 
10318 		/* Rotation / mirroring updates. */
10319 		if (old_other_state->rotation != new_other_state->rotation)
10320 			return true;
10321 
10322 		/* Blending updates. */
10323 		if (old_other_state->pixel_blend_mode !=
10324 		    new_other_state->pixel_blend_mode)
10325 			return true;
10326 
10327 		/* Alpha updates. */
10328 		if (old_other_state->alpha != new_other_state->alpha)
10329 			return true;
10330 
10331 		/* Colorspace changes. */
10332 		if (old_other_state->color_range != new_other_state->color_range ||
10333 		    old_other_state->color_encoding != new_other_state->color_encoding)
10334 			return true;
10335 
10336 		/* Framebuffer checks fall at the end. */
10337 		if (!old_other_state->fb || !new_other_state->fb)
10338 			continue;
10339 
10340 		/* Pixel format changes can require bandwidth updates. */
10341 		if (old_other_state->fb->format != new_other_state->fb->format)
10342 			return true;
10343 
10344 		old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
10345 		new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
10346 
10347 		/* Tiling and DCC changes also require bandwidth updates. */
10348 		if (old_afb->tiling_flags != new_afb->tiling_flags ||
10349 		    old_afb->base.modifier != new_afb->base.modifier)
10350 			return true;
10351 	}
10352 
10353 	return false;
10354 }
10355 
10356 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
10357 			      struct drm_plane_state *new_plane_state,
10358 			      struct drm_framebuffer *fb)
10359 {
10360 	struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
10361 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
10362 	unsigned int pitch;
10363 	bool linear;
10364 
10365 	if (fb->width > new_acrtc->max_cursor_width ||
10366 	    fb->height > new_acrtc->max_cursor_height) {
10367 		DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
10368 				 new_plane_state->fb->width,
10369 				 new_plane_state->fb->height);
10370 		return -EINVAL;
10371 	}
10372 	if (new_plane_state->src_w != fb->width << 16 ||
10373 	    new_plane_state->src_h != fb->height << 16) {
10374 		DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10375 		return -EINVAL;
10376 	}
10377 
10378 	/* Pitch in pixels */
10379 	pitch = fb->pitches[0] / fb->format->cpp[0];
10380 
10381 	if (fb->width != pitch) {
10382 		DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
10383 				 fb->width, pitch);
10384 		return -EINVAL;
10385 	}
10386 
10387 	switch (pitch) {
10388 	case 64:
10389 	case 128:
10390 	case 256:
10391 		/* FB pitch is supported by cursor plane */
10392 		break;
10393 	default:
10394 		DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
10395 		return -EINVAL;
10396 	}
10397 
10398 	/* Core DRM takes care of checking FB modifiers, so we only need to
10399 	 * check tiling flags when the FB doesn't have a modifier. */
10400 	if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
10401 		if (adev->family < AMDGPU_FAMILY_AI) {
10402 			linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
10403 			         AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
10404 				 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
10405 		} else {
10406 			linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
10407 		}
10408 		if (!linear) {
10409 			DRM_DEBUG_ATOMIC("Cursor FB not linear");
10410 			return -EINVAL;
10411 		}
10412 	}
10413 
10414 	return 0;
10415 }
10416 
10417 static int dm_update_plane_state(struct dc *dc,
10418 				 struct drm_atomic_state *state,
10419 				 struct drm_plane *plane,
10420 				 struct drm_plane_state *old_plane_state,
10421 				 struct drm_plane_state *new_plane_state,
10422 				 bool enable,
10423 				 bool *lock_and_validation_needed)
10424 {
10425 
10426 	struct dm_atomic_state *dm_state = NULL;
10427 	struct drm_crtc *new_plane_crtc, *old_plane_crtc;
10428 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10429 	struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
10430 	struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
10431 	struct amdgpu_crtc *new_acrtc;
10432 	bool needs_reset;
10433 	int ret = 0;
10434 
10435 
10436 	new_plane_crtc = new_plane_state->crtc;
10437 	old_plane_crtc = old_plane_state->crtc;
10438 	dm_new_plane_state = to_dm_plane_state(new_plane_state);
10439 	dm_old_plane_state = to_dm_plane_state(old_plane_state);
10440 
10441 	if (plane->type == DRM_PLANE_TYPE_CURSOR) {
10442 		if (!enable || !new_plane_crtc ||
10443 			drm_atomic_plane_disabling(plane->state, new_plane_state))
10444 			return 0;
10445 
10446 		new_acrtc = to_amdgpu_crtc(new_plane_crtc);
10447 
10448 		if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
10449 			DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10450 			return -EINVAL;
10451 		}
10452 
10453 		if (new_plane_state->fb) {
10454 			ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
10455 						 new_plane_state->fb);
10456 			if (ret)
10457 				return ret;
10458 		}
10459 
10460 		return 0;
10461 	}
10462 
10463 	needs_reset = should_reset_plane(state, plane, old_plane_state,
10464 					 new_plane_state);
10465 
10466 	/* Remove any changed/removed planes */
10467 	if (!enable) {
10468 		if (!needs_reset)
10469 			return 0;
10470 
10471 		if (!old_plane_crtc)
10472 			return 0;
10473 
10474 		old_crtc_state = drm_atomic_get_old_crtc_state(
10475 				state, old_plane_crtc);
10476 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10477 
10478 		if (!dm_old_crtc_state->stream)
10479 			return 0;
10480 
10481 		DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
10482 				plane->base.id, old_plane_crtc->base.id);
10483 
10484 		ret = dm_atomic_get_state(state, &dm_state);
10485 		if (ret)
10486 			return ret;
10487 
10488 		if (!dc_remove_plane_from_context(
10489 				dc,
10490 				dm_old_crtc_state->stream,
10491 				dm_old_plane_state->dc_state,
10492 				dm_state->context)) {
10493 
10494 			return -EINVAL;
10495 		}
10496 
10497 
10498 		dc_plane_state_release(dm_old_plane_state->dc_state);
10499 		dm_new_plane_state->dc_state = NULL;
10500 
10501 		*lock_and_validation_needed = true;
10502 
10503 	} else { /* Add new planes */
10504 		struct dc_plane_state *dc_new_plane_state;
10505 
10506 		if (drm_atomic_plane_disabling(plane->state, new_plane_state))
10507 			return 0;
10508 
10509 		if (!new_plane_crtc)
10510 			return 0;
10511 
10512 		new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
10513 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10514 
10515 		if (!dm_new_crtc_state->stream)
10516 			return 0;
10517 
10518 		if (!needs_reset)
10519 			return 0;
10520 
10521 		ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
10522 		if (ret)
10523 			return ret;
10524 
10525 		WARN_ON(dm_new_plane_state->dc_state);
10526 
10527 		dc_new_plane_state = dc_create_plane_state(dc);
10528 		if (!dc_new_plane_state)
10529 			return -ENOMEM;
10530 
10531 		DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
10532 				 plane->base.id, new_plane_crtc->base.id);
10533 
10534 		ret = fill_dc_plane_attributes(
10535 			drm_to_adev(new_plane_crtc->dev),
10536 			dc_new_plane_state,
10537 			new_plane_state,
10538 			new_crtc_state);
10539 		if (ret) {
10540 			dc_plane_state_release(dc_new_plane_state);
10541 			return ret;
10542 		}
10543 
10544 		ret = dm_atomic_get_state(state, &dm_state);
10545 		if (ret) {
10546 			dc_plane_state_release(dc_new_plane_state);
10547 			return ret;
10548 		}
10549 
10550 		/*
10551 		 * Any atomic check errors that occur after this will
10552 		 * not need a release. The plane state will be attached
10553 		 * to the stream, and therefore part of the atomic
10554 		 * state. It'll be released when the atomic state is
10555 		 * cleaned.
10556 		 */
10557 		if (!dc_add_plane_to_context(
10558 				dc,
10559 				dm_new_crtc_state->stream,
10560 				dc_new_plane_state,
10561 				dm_state->context)) {
10562 
10563 			dc_plane_state_release(dc_new_plane_state);
10564 			return -EINVAL;
10565 		}
10566 
10567 		dm_new_plane_state->dc_state = dc_new_plane_state;
10568 
10569 		/* Tell DC to do a full surface update every time there
10570 		 * is a plane change. Inefficient, but works for now.
10571 		 */
10572 		dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
10573 
10574 		*lock_and_validation_needed = true;
10575 	}
10576 
10577 
10578 	return ret;
10579 }
10580 
10581 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
10582 				struct drm_crtc *crtc,
10583 				struct drm_crtc_state *new_crtc_state)
10584 {
10585 	struct drm_plane *cursor = crtc->cursor, *underlying;
10586 	struct drm_plane_state *new_cursor_state, *new_underlying_state;
10587 	int i;
10588 	int cursor_scale_w, cursor_scale_h, underlying_scale_w, underlying_scale_h;
10589 
10590 	/* On DCE and DCN there is no dedicated hardware cursor plane. We get a
10591 	 * cursor per pipe but it's going to inherit the scaling and
10592 	 * positioning from the underlying pipe. Check the cursor plane's
10593 	 * blending properties match the underlying planes'. */
10594 
10595 	new_cursor_state = drm_atomic_get_new_plane_state(state, cursor);
10596 	if (!new_cursor_state || !new_cursor_state->fb) {
10597 		return 0;
10598 	}
10599 
10600 	cursor_scale_w = new_cursor_state->crtc_w * 1000 /
10601 			 (new_cursor_state->src_w >> 16);
10602 	cursor_scale_h = new_cursor_state->crtc_h * 1000 /
10603 			 (new_cursor_state->src_h >> 16);
10604 
10605 	for_each_new_plane_in_state_reverse(state, underlying, new_underlying_state, i) {
10606 		/* Narrow down to non-cursor planes on the same CRTC as the cursor */
10607 		if (new_underlying_state->crtc != crtc || underlying == crtc->cursor)
10608 			continue;
10609 
10610 		/* Ignore disabled planes */
10611 		if (!new_underlying_state->fb)
10612 			continue;
10613 
10614 		underlying_scale_w = new_underlying_state->crtc_w * 1000 /
10615 				     (new_underlying_state->src_w >> 16);
10616 		underlying_scale_h = new_underlying_state->crtc_h * 1000 /
10617 				     (new_underlying_state->src_h >> 16);
10618 
10619 		if (cursor_scale_w != underlying_scale_w ||
10620 		    cursor_scale_h != underlying_scale_h) {
10621 			drm_dbg_atomic(crtc->dev,
10622 				       "Cursor [PLANE:%d:%s] scaling doesn't match underlying [PLANE:%d:%s]\n",
10623 				       cursor->base.id, cursor->name, underlying->base.id, underlying->name);
10624 			return -EINVAL;
10625 		}
10626 
10627 		/* If this plane covers the whole CRTC, no need to check planes underneath */
10628 		if (new_underlying_state->crtc_x <= 0 &&
10629 		    new_underlying_state->crtc_y <= 0 &&
10630 		    new_underlying_state->crtc_x + new_underlying_state->crtc_w >= new_crtc_state->mode.hdisplay &&
10631 		    new_underlying_state->crtc_y + new_underlying_state->crtc_h >= new_crtc_state->mode.vdisplay)
10632 			break;
10633 	}
10634 
10635 	return 0;
10636 }
10637 
10638 #if defined(CONFIG_DRM_AMD_DC_DCN)
10639 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
10640 {
10641 	struct drm_connector *connector;
10642 	struct drm_connector_state *conn_state;
10643 	struct amdgpu_dm_connector *aconnector = NULL;
10644 	int i;
10645 	for_each_new_connector_in_state(state, connector, conn_state, i) {
10646 		if (conn_state->crtc != crtc)
10647 			continue;
10648 
10649 		aconnector = to_amdgpu_dm_connector(connector);
10650 		if (!aconnector->port || !aconnector->mst_port)
10651 			aconnector = NULL;
10652 		else
10653 			break;
10654 	}
10655 
10656 	if (!aconnector)
10657 		return 0;
10658 
10659 	return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
10660 }
10661 #endif
10662 
10663 /**
10664  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
10665  * @dev: The DRM device
10666  * @state: The atomic state to commit
10667  *
10668  * Validate that the given atomic state is programmable by DC into hardware.
10669  * This involves constructing a &struct dc_state reflecting the new hardware
10670  * state we wish to commit, then querying DC to see if it is programmable. It's
10671  * important not to modify the existing DC state. Otherwise, atomic_check
10672  * may unexpectedly commit hardware changes.
10673  *
10674  * When validating the DC state, it's important that the right locks are
10675  * acquired. For full updates case which removes/adds/updates streams on one
10676  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
10677  * that any such full update commit will wait for completion of any outstanding
10678  * flip using DRMs synchronization events.
10679  *
10680  * Note that DM adds the affected connectors for all CRTCs in state, when that
10681  * might not seem necessary. This is because DC stream creation requires the
10682  * DC sink, which is tied to the DRM connector state. Cleaning this up should
10683  * be possible but non-trivial - a possible TODO item.
10684  *
10685  * Return: -Error code if validation failed.
10686  */
10687 static int amdgpu_dm_atomic_check(struct drm_device *dev,
10688 				  struct drm_atomic_state *state)
10689 {
10690 	struct amdgpu_device *adev = drm_to_adev(dev);
10691 	struct dm_atomic_state *dm_state = NULL;
10692 	struct dc *dc = adev->dm.dc;
10693 	struct drm_connector *connector;
10694 	struct drm_connector_state *old_con_state, *new_con_state;
10695 	struct drm_crtc *crtc;
10696 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10697 	struct drm_plane *plane;
10698 	struct drm_plane_state *old_plane_state, *new_plane_state;
10699 	enum dc_status status;
10700 	int ret, i;
10701 	bool lock_and_validation_needed = false;
10702 	struct dm_crtc_state *dm_old_crtc_state;
10703 #if defined(CONFIG_DRM_AMD_DC_DCN)
10704 	struct dsc_mst_fairness_vars vars[MAX_PIPES];
10705 	struct drm_dp_mst_topology_state *mst_state;
10706 	struct drm_dp_mst_topology_mgr *mgr;
10707 #endif
10708 
10709 	trace_amdgpu_dm_atomic_check_begin(state);
10710 
10711 	ret = drm_atomic_helper_check_modeset(dev, state);
10712 	if (ret)
10713 		goto fail;
10714 
10715 	/* Check connector changes */
10716 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10717 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10718 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10719 
10720 		/* Skip connectors that are disabled or part of modeset already. */
10721 		if (!old_con_state->crtc && !new_con_state->crtc)
10722 			continue;
10723 
10724 		if (!new_con_state->crtc)
10725 			continue;
10726 
10727 		new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
10728 		if (IS_ERR(new_crtc_state)) {
10729 			ret = PTR_ERR(new_crtc_state);
10730 			goto fail;
10731 		}
10732 
10733 		if (dm_old_con_state->abm_level !=
10734 		    dm_new_con_state->abm_level)
10735 			new_crtc_state->connectors_changed = true;
10736 	}
10737 
10738 #if defined(CONFIG_DRM_AMD_DC_DCN)
10739 	if (dc_resource_is_dsc_encoding_supported(dc)) {
10740 		for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10741 			if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10742 				ret = add_affected_mst_dsc_crtcs(state, crtc);
10743 				if (ret)
10744 					goto fail;
10745 			}
10746 		}
10747 	}
10748 #endif
10749 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10750 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10751 
10752 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
10753 		    !new_crtc_state->color_mgmt_changed &&
10754 		    old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
10755 			dm_old_crtc_state->dsc_force_changed == false)
10756 			continue;
10757 
10758 		ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
10759 		if (ret)
10760 			goto fail;
10761 
10762 		if (!new_crtc_state->enable)
10763 			continue;
10764 
10765 		ret = drm_atomic_add_affected_connectors(state, crtc);
10766 		if (ret)
10767 			return ret;
10768 
10769 		ret = drm_atomic_add_affected_planes(state, crtc);
10770 		if (ret)
10771 			goto fail;
10772 
10773 		if (dm_old_crtc_state->dsc_force_changed)
10774 			new_crtc_state->mode_changed = true;
10775 	}
10776 
10777 	/*
10778 	 * Add all primary and overlay planes on the CRTC to the state
10779 	 * whenever a plane is enabled to maintain correct z-ordering
10780 	 * and to enable fast surface updates.
10781 	 */
10782 	drm_for_each_crtc(crtc, dev) {
10783 		bool modified = false;
10784 
10785 		for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
10786 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
10787 				continue;
10788 
10789 			if (new_plane_state->crtc == crtc ||
10790 			    old_plane_state->crtc == crtc) {
10791 				modified = true;
10792 				break;
10793 			}
10794 		}
10795 
10796 		if (!modified)
10797 			continue;
10798 
10799 		drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
10800 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
10801 				continue;
10802 
10803 			new_plane_state =
10804 				drm_atomic_get_plane_state(state, plane);
10805 
10806 			if (IS_ERR(new_plane_state)) {
10807 				ret = PTR_ERR(new_plane_state);
10808 				goto fail;
10809 			}
10810 		}
10811 	}
10812 
10813 	/* Remove exiting planes if they are modified */
10814 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10815 		ret = dm_update_plane_state(dc, state, plane,
10816 					    old_plane_state,
10817 					    new_plane_state,
10818 					    false,
10819 					    &lock_and_validation_needed);
10820 		if (ret)
10821 			goto fail;
10822 	}
10823 
10824 	/* Disable all crtcs which require disable */
10825 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10826 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
10827 					   old_crtc_state,
10828 					   new_crtc_state,
10829 					   false,
10830 					   &lock_and_validation_needed);
10831 		if (ret)
10832 			goto fail;
10833 	}
10834 
10835 	/* Enable all crtcs which require enable */
10836 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10837 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
10838 					   old_crtc_state,
10839 					   new_crtc_state,
10840 					   true,
10841 					   &lock_and_validation_needed);
10842 		if (ret)
10843 			goto fail;
10844 	}
10845 
10846 	/* Add new/modified planes */
10847 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10848 		ret = dm_update_plane_state(dc, state, plane,
10849 					    old_plane_state,
10850 					    new_plane_state,
10851 					    true,
10852 					    &lock_and_validation_needed);
10853 		if (ret)
10854 			goto fail;
10855 	}
10856 
10857 	/* Run this here since we want to validate the streams we created */
10858 	ret = drm_atomic_helper_check_planes(dev, state);
10859 	if (ret)
10860 		goto fail;
10861 
10862 	/* Check cursor planes scaling */
10863 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
10864 		ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
10865 		if (ret)
10866 			goto fail;
10867 	}
10868 
10869 	if (state->legacy_cursor_update) {
10870 		/*
10871 		 * This is a fast cursor update coming from the plane update
10872 		 * helper, check if it can be done asynchronously for better
10873 		 * performance.
10874 		 */
10875 		state->async_update =
10876 			!drm_atomic_helper_async_check(dev, state);
10877 
10878 		/*
10879 		 * Skip the remaining global validation if this is an async
10880 		 * update. Cursor updates can be done without affecting
10881 		 * state or bandwidth calcs and this avoids the performance
10882 		 * penalty of locking the private state object and
10883 		 * allocating a new dc_state.
10884 		 */
10885 		if (state->async_update)
10886 			return 0;
10887 	}
10888 
10889 	/* Check scaling and underscan changes*/
10890 	/* TODO Removed scaling changes validation due to inability to commit
10891 	 * new stream into context w\o causing full reset. Need to
10892 	 * decide how to handle.
10893 	 */
10894 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10895 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10896 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10897 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
10898 
10899 		/* Skip any modesets/resets */
10900 		if (!acrtc || drm_atomic_crtc_needs_modeset(
10901 				drm_atomic_get_new_crtc_state(state, &acrtc->base)))
10902 			continue;
10903 
10904 		/* Skip any thing not scale or underscan changes */
10905 		if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
10906 			continue;
10907 
10908 		lock_and_validation_needed = true;
10909 	}
10910 
10911 #if defined(CONFIG_DRM_AMD_DC_DCN)
10912 	/* set the slot info for each mst_state based on the link encoding format */
10913 	for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
10914 		struct amdgpu_dm_connector *aconnector;
10915 		struct drm_connector *connector;
10916 		struct drm_connector_list_iter iter;
10917 		u8 link_coding_cap;
10918 
10919 		if (!mgr->mst_state )
10920 			continue;
10921 
10922 		drm_connector_list_iter_begin(dev, &iter);
10923 		drm_for_each_connector_iter(connector, &iter) {
10924 			int id = connector->index;
10925 
10926 			if (id == mst_state->mgr->conn_base_id) {
10927 				aconnector = to_amdgpu_dm_connector(connector);
10928 				link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link);
10929 				drm_dp_mst_update_slots(mst_state, link_coding_cap);
10930 
10931 				break;
10932 			}
10933 		}
10934 		drm_connector_list_iter_end(&iter);
10935 
10936 	}
10937 #endif
10938 	/**
10939 	 * Streams and planes are reset when there are changes that affect
10940 	 * bandwidth. Anything that affects bandwidth needs to go through
10941 	 * DC global validation to ensure that the configuration can be applied
10942 	 * to hardware.
10943 	 *
10944 	 * We have to currently stall out here in atomic_check for outstanding
10945 	 * commits to finish in this case because our IRQ handlers reference
10946 	 * DRM state directly - we can end up disabling interrupts too early
10947 	 * if we don't.
10948 	 *
10949 	 * TODO: Remove this stall and drop DM state private objects.
10950 	 */
10951 	if (lock_and_validation_needed) {
10952 		ret = dm_atomic_get_state(state, &dm_state);
10953 		if (ret)
10954 			goto fail;
10955 
10956 		ret = do_aquire_global_lock(dev, state);
10957 		if (ret)
10958 			goto fail;
10959 
10960 #if defined(CONFIG_DRM_AMD_DC_DCN)
10961 		if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars))
10962 			goto fail;
10963 
10964 		ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
10965 		if (ret)
10966 			goto fail;
10967 #endif
10968 
10969 		/*
10970 		 * Perform validation of MST topology in the state:
10971 		 * We need to perform MST atomic check before calling
10972 		 * dc_validate_global_state(), or there is a chance
10973 		 * to get stuck in an infinite loop and hang eventually.
10974 		 */
10975 		ret = drm_dp_mst_atomic_check(state);
10976 		if (ret)
10977 			goto fail;
10978 		status = dc_validate_global_state(dc, dm_state->context, false);
10979 		if (status != DC_OK) {
10980 			drm_dbg_atomic(dev,
10981 				       "DC global validation failure: %s (%d)",
10982 				       dc_status_to_str(status), status);
10983 			ret = -EINVAL;
10984 			goto fail;
10985 		}
10986 	} else {
10987 		/*
10988 		 * The commit is a fast update. Fast updates shouldn't change
10989 		 * the DC context, affect global validation, and can have their
10990 		 * commit work done in parallel with other commits not touching
10991 		 * the same resource. If we have a new DC context as part of
10992 		 * the DM atomic state from validation we need to free it and
10993 		 * retain the existing one instead.
10994 		 *
10995 		 * Furthermore, since the DM atomic state only contains the DC
10996 		 * context and can safely be annulled, we can free the state
10997 		 * and clear the associated private object now to free
10998 		 * some memory and avoid a possible use-after-free later.
10999 		 */
11000 
11001 		for (i = 0; i < state->num_private_objs; i++) {
11002 			struct drm_private_obj *obj = state->private_objs[i].ptr;
11003 
11004 			if (obj->funcs == adev->dm.atomic_obj.funcs) {
11005 				int j = state->num_private_objs-1;
11006 
11007 				dm_atomic_destroy_state(obj,
11008 						state->private_objs[i].state);
11009 
11010 				/* If i is not at the end of the array then the
11011 				 * last element needs to be moved to where i was
11012 				 * before the array can safely be truncated.
11013 				 */
11014 				if (i != j)
11015 					state->private_objs[i] =
11016 						state->private_objs[j];
11017 
11018 				state->private_objs[j].ptr = NULL;
11019 				state->private_objs[j].state = NULL;
11020 				state->private_objs[j].old_state = NULL;
11021 				state->private_objs[j].new_state = NULL;
11022 
11023 				state->num_private_objs = j;
11024 				break;
11025 			}
11026 		}
11027 	}
11028 
11029 	/* Store the overall update type for use later in atomic check. */
11030 	for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
11031 		struct dm_crtc_state *dm_new_crtc_state =
11032 			to_dm_crtc_state(new_crtc_state);
11033 
11034 		dm_new_crtc_state->update_type = lock_and_validation_needed ?
11035 							 UPDATE_TYPE_FULL :
11036 							 UPDATE_TYPE_FAST;
11037 	}
11038 
11039 	/* Must be success */
11040 	WARN_ON(ret);
11041 
11042 	trace_amdgpu_dm_atomic_check_finish(state, ret);
11043 
11044 	return ret;
11045 
11046 fail:
11047 	if (ret == -EDEADLK)
11048 		DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
11049 	else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
11050 		DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
11051 	else
11052 		DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
11053 
11054 	trace_amdgpu_dm_atomic_check_finish(state, ret);
11055 
11056 	return ret;
11057 }
11058 
11059 static bool is_dp_capable_without_timing_msa(struct dc *dc,
11060 					     struct amdgpu_dm_connector *amdgpu_dm_connector)
11061 {
11062 	uint8_t dpcd_data;
11063 	bool capable = false;
11064 
11065 	if (amdgpu_dm_connector->dc_link &&
11066 		dm_helpers_dp_read_dpcd(
11067 				NULL,
11068 				amdgpu_dm_connector->dc_link,
11069 				DP_DOWN_STREAM_PORT_COUNT,
11070 				&dpcd_data,
11071 				sizeof(dpcd_data))) {
11072 		capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
11073 	}
11074 
11075 	return capable;
11076 }
11077 
11078 static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
11079 		unsigned int offset,
11080 		unsigned int total_length,
11081 		uint8_t *data,
11082 		unsigned int length,
11083 		struct amdgpu_hdmi_vsdb_info *vsdb)
11084 {
11085 	bool res;
11086 	union dmub_rb_cmd cmd;
11087 	struct dmub_cmd_send_edid_cea *input;
11088 	struct dmub_cmd_edid_cea_output *output;
11089 
11090 	if (length > DMUB_EDID_CEA_DATA_CHUNK_BYTES)
11091 		return false;
11092 
11093 	memset(&cmd, 0, sizeof(cmd));
11094 
11095 	input = &cmd.edid_cea.data.input;
11096 
11097 	cmd.edid_cea.header.type = DMUB_CMD__EDID_CEA;
11098 	cmd.edid_cea.header.sub_type = 0;
11099 	cmd.edid_cea.header.payload_bytes =
11100 		sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header);
11101 	input->offset = offset;
11102 	input->length = length;
11103 	input->total_length = total_length;
11104 	memcpy(input->payload, data, length);
11105 
11106 	res = dc_dmub_srv_cmd_with_reply_data(dm->dc->ctx->dmub_srv, &cmd);
11107 	if (!res) {
11108 		DRM_ERROR("EDID CEA parser failed\n");
11109 		return false;
11110 	}
11111 
11112 	output = &cmd.edid_cea.data.output;
11113 
11114 	if (output->type == DMUB_CMD__EDID_CEA_ACK) {
11115 		if (!output->ack.success) {
11116 			DRM_ERROR("EDID CEA ack failed at offset %d\n",
11117 					output->ack.offset);
11118 		}
11119 	} else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) {
11120 		if (!output->amd_vsdb.vsdb_found)
11121 			return false;
11122 
11123 		vsdb->freesync_supported = output->amd_vsdb.freesync_supported;
11124 		vsdb->amd_vsdb_version = output->amd_vsdb.amd_vsdb_version;
11125 		vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate;
11126 		vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate;
11127 	} else {
11128 		DRM_WARN("Unknown EDID CEA parser results\n");
11129 		return false;
11130 	}
11131 
11132 	return true;
11133 }
11134 
11135 static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
11136 		uint8_t *edid_ext, int len,
11137 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
11138 {
11139 	int i;
11140 
11141 	/* send extension block to DMCU for parsing */
11142 	for (i = 0; i < len; i += 8) {
11143 		bool res;
11144 		int offset;
11145 
11146 		/* send 8 bytes a time */
11147 		if (!dc_edid_parser_send_cea(dm->dc, i, len, &edid_ext[i], 8))
11148 			return false;
11149 
11150 		if (i+8 == len) {
11151 			/* EDID block sent completed, expect result */
11152 			int version, min_rate, max_rate;
11153 
11154 			res = dc_edid_parser_recv_amd_vsdb(dm->dc, &version, &min_rate, &max_rate);
11155 			if (res) {
11156 				/* amd vsdb found */
11157 				vsdb_info->freesync_supported = 1;
11158 				vsdb_info->amd_vsdb_version = version;
11159 				vsdb_info->min_refresh_rate_hz = min_rate;
11160 				vsdb_info->max_refresh_rate_hz = max_rate;
11161 				return true;
11162 			}
11163 			/* not amd vsdb */
11164 			return false;
11165 		}
11166 
11167 		/* check for ack*/
11168 		res = dc_edid_parser_recv_cea_ack(dm->dc, &offset);
11169 		if (!res)
11170 			return false;
11171 	}
11172 
11173 	return false;
11174 }
11175 
11176 static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,
11177 		uint8_t *edid_ext, int len,
11178 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
11179 {
11180 	int i;
11181 
11182 	/* send extension block to DMCU for parsing */
11183 	for (i = 0; i < len; i += 8) {
11184 		/* send 8 bytes a time */
11185 		if (!dm_edid_parser_send_cea(dm, i, len, &edid_ext[i], 8, vsdb_info))
11186 			return false;
11187 	}
11188 
11189 	return vsdb_info->freesync_supported;
11190 }
11191 
11192 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
11193 		uint8_t *edid_ext, int len,
11194 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
11195 {
11196 	struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
11197 
11198 	if (adev->dm.dmub_srv)
11199 		return parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info);
11200 	else
11201 		return parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info);
11202 }
11203 
11204 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
11205 		struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
11206 {
11207 	uint8_t *edid_ext = NULL;
11208 	int i;
11209 	bool valid_vsdb_found = false;
11210 
11211 	/*----- drm_find_cea_extension() -----*/
11212 	/* No EDID or EDID extensions */
11213 	if (edid == NULL || edid->extensions == 0)
11214 		return -ENODEV;
11215 
11216 	/* Find CEA extension */
11217 	for (i = 0; i < edid->extensions; i++) {
11218 		edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
11219 		if (edid_ext[0] == CEA_EXT)
11220 			break;
11221 	}
11222 
11223 	if (i == edid->extensions)
11224 		return -ENODEV;
11225 
11226 	/*----- cea_db_offsets() -----*/
11227 	if (edid_ext[0] != CEA_EXT)
11228 		return -ENODEV;
11229 
11230 	valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
11231 
11232 	return valid_vsdb_found ? i : -ENODEV;
11233 }
11234 
11235 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
11236 					struct edid *edid)
11237 {
11238 	int i = 0;
11239 	struct detailed_timing *timing;
11240 	struct detailed_non_pixel *data;
11241 	struct detailed_data_monitor_range *range;
11242 	struct amdgpu_dm_connector *amdgpu_dm_connector =
11243 			to_amdgpu_dm_connector(connector);
11244 	struct dm_connector_state *dm_con_state = NULL;
11245 	struct dc_sink *sink;
11246 
11247 	struct drm_device *dev = connector->dev;
11248 	struct amdgpu_device *adev = drm_to_adev(dev);
11249 	bool freesync_capable = false;
11250 	struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
11251 
11252 	if (!connector->state) {
11253 		DRM_ERROR("%s - Connector has no state", __func__);
11254 		goto update;
11255 	}
11256 
11257 	sink = amdgpu_dm_connector->dc_sink ?
11258 		amdgpu_dm_connector->dc_sink :
11259 		amdgpu_dm_connector->dc_em_sink;
11260 
11261 	if (!edid || !sink) {
11262 		dm_con_state = to_dm_connector_state(connector->state);
11263 
11264 		amdgpu_dm_connector->min_vfreq = 0;
11265 		amdgpu_dm_connector->max_vfreq = 0;
11266 		amdgpu_dm_connector->pixel_clock_mhz = 0;
11267 		connector->display_info.monitor_range.min_vfreq = 0;
11268 		connector->display_info.monitor_range.max_vfreq = 0;
11269 		freesync_capable = false;
11270 
11271 		goto update;
11272 	}
11273 
11274 	dm_con_state = to_dm_connector_state(connector->state);
11275 
11276 	if (!adev->dm.freesync_module)
11277 		goto update;
11278 
11279 
11280 	if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
11281 		|| sink->sink_signal == SIGNAL_TYPE_EDP) {
11282 		bool edid_check_required = false;
11283 
11284 		if (edid) {
11285 			edid_check_required = is_dp_capable_without_timing_msa(
11286 						adev->dm.dc,
11287 						amdgpu_dm_connector);
11288 		}
11289 
11290 		if (edid_check_required == true && (edid->version > 1 ||
11291 		   (edid->version == 1 && edid->revision > 1))) {
11292 			for (i = 0; i < 4; i++) {
11293 
11294 				timing	= &edid->detailed_timings[i];
11295 				data	= &timing->data.other_data;
11296 				range	= &data->data.range;
11297 				/*
11298 				 * Check if monitor has continuous frequency mode
11299 				 */
11300 				if (data->type != EDID_DETAIL_MONITOR_RANGE)
11301 					continue;
11302 				/*
11303 				 * Check for flag range limits only. If flag == 1 then
11304 				 * no additional timing information provided.
11305 				 * Default GTF, GTF Secondary curve and CVT are not
11306 				 * supported
11307 				 */
11308 				if (range->flags != 1)
11309 					continue;
11310 
11311 				amdgpu_dm_connector->min_vfreq = range->min_vfreq;
11312 				amdgpu_dm_connector->max_vfreq = range->max_vfreq;
11313 				amdgpu_dm_connector->pixel_clock_mhz =
11314 					range->pixel_clock_mhz * 10;
11315 
11316 				connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
11317 				connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
11318 
11319 				break;
11320 			}
11321 
11322 			if (amdgpu_dm_connector->max_vfreq -
11323 			    amdgpu_dm_connector->min_vfreq > 10) {
11324 
11325 				freesync_capable = true;
11326 			}
11327 		}
11328 	} else if (edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
11329 		i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
11330 		if (i >= 0 && vsdb_info.freesync_supported) {
11331 			timing  = &edid->detailed_timings[i];
11332 			data    = &timing->data.other_data;
11333 
11334 			amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
11335 			amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
11336 			if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
11337 				freesync_capable = true;
11338 
11339 			connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
11340 			connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
11341 		}
11342 	}
11343 
11344 update:
11345 	if (dm_con_state)
11346 		dm_con_state->freesync_capable = freesync_capable;
11347 
11348 	if (connector->vrr_capable_property)
11349 		drm_connector_set_vrr_capable_property(connector,
11350 						       freesync_capable);
11351 }
11352 
11353 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
11354 {
11355 	struct amdgpu_device *adev = drm_to_adev(dev);
11356 	struct dc *dc = adev->dm.dc;
11357 	int i;
11358 
11359 	mutex_lock(&adev->dm.dc_lock);
11360 	if (dc->current_state) {
11361 		for (i = 0; i < dc->current_state->stream_count; ++i)
11362 			dc->current_state->streams[i]
11363 				->triggered_crtc_reset.enabled =
11364 				adev->dm.force_timing_sync;
11365 
11366 		dm_enable_per_frame_crtc_master_sync(dc->current_state);
11367 		dc_trigger_sync(dc, dc->current_state);
11368 	}
11369 	mutex_unlock(&adev->dm.dc_lock);
11370 }
11371 
11372 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
11373 		       uint32_t value, const char *func_name)
11374 {
11375 #ifdef DM_CHECK_ADDR_0
11376 	if (address == 0) {
11377 		DC_ERR("invalid register write. address = 0");
11378 		return;
11379 	}
11380 #endif
11381 	cgs_write_register(ctx->cgs_device, address, value);
11382 	trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
11383 }
11384 
11385 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
11386 			  const char *func_name)
11387 {
11388 	uint32_t value;
11389 #ifdef DM_CHECK_ADDR_0
11390 	if (address == 0) {
11391 		DC_ERR("invalid register read; address = 0\n");
11392 		return 0;
11393 	}
11394 #endif
11395 
11396 	if (ctx->dmub_srv &&
11397 	    ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
11398 	    !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
11399 		ASSERT(false);
11400 		return 0;
11401 	}
11402 
11403 	value = cgs_read_register(ctx->cgs_device, address);
11404 
11405 	trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
11406 
11407 	return value;
11408 }
11409 
11410 int amdgpu_dm_set_dmub_async_sync_status(bool is_cmd_aux, struct dc_context *ctx,
11411 	uint8_t status_type, uint32_t *operation_result)
11412 {
11413 	struct amdgpu_device *adev = ctx->driver_context;
11414 	int return_status = -1;
11415 	struct dmub_notification *p_notify = adev->dm.dmub_notify;
11416 
11417 	if (is_cmd_aux) {
11418 		if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
11419 			return_status = p_notify->aux_reply.length;
11420 			*operation_result = p_notify->result;
11421 		} else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT) {
11422 			*operation_result = AUX_RET_ERROR_TIMEOUT;
11423 		} else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_FAIL) {
11424 			*operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE;
11425 		} else {
11426 			*operation_result = AUX_RET_ERROR_UNKNOWN;
11427 		}
11428 	} else {
11429 		if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
11430 			return_status = 0;
11431 			*operation_result = p_notify->sc_status;
11432 		} else {
11433 			*operation_result = SET_CONFIG_UNKNOWN_ERROR;
11434 		}
11435 	}
11436 
11437 	return return_status;
11438 }
11439 
11440 int amdgpu_dm_process_dmub_aux_transfer_sync(bool is_cmd_aux, struct dc_context *ctx,
11441 	unsigned int link_index, void *cmd_payload, void *operation_result)
11442 {
11443 	struct amdgpu_device *adev = ctx->driver_context;
11444 	int ret = 0;
11445 
11446 	if (is_cmd_aux) {
11447 		dc_process_dmub_aux_transfer_async(ctx->dc,
11448 			link_index, (struct aux_payload *)cmd_payload);
11449 	} else if (dc_process_dmub_set_config_async(ctx->dc, link_index,
11450 					(struct set_config_cmd_payload *)cmd_payload,
11451 					adev->dm.dmub_notify)) {
11452 		return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11453 					ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
11454 					(uint32_t *)operation_result);
11455 	}
11456 
11457 	ret = wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ);
11458 	if (ret == 0) {
11459 		DRM_ERROR("wait_for_completion_timeout timeout!");
11460 		return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11461 				ctx, DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT,
11462 				(uint32_t *)operation_result);
11463 	}
11464 
11465 	if (is_cmd_aux) {
11466 		if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) {
11467 			struct aux_payload *payload = (struct aux_payload *)cmd_payload;
11468 
11469 			payload->reply[0] = adev->dm.dmub_notify->aux_reply.command;
11470 			if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
11471 			    payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK) {
11472 				memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
11473 				       adev->dm.dmub_notify->aux_reply.length);
11474 			}
11475 		}
11476 	}
11477 
11478 	return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11479 			ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
11480 			(uint32_t *)operation_result);
11481 }
11482