xref: /linux/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c (revision 7c021558929f7c80ed07e83a91d0905c31e8cd9a)
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28 
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc_link_dp.h"
32 #include "link_enc_cfg.h"
33 #include "dc/inc/core_types.h"
34 #include "dal_asic_id.h"
35 #include "dmub/dmub_srv.h"
36 #include "dc/inc/hw/dmcu.h"
37 #include "dc/inc/hw/abm.h"
38 #include "dc/dc_dmub_srv.h"
39 #include "dc/dc_edid_parser.h"
40 #include "dc/dc_stat.h"
41 #include "amdgpu_dm_trace.h"
42 
43 #include "vid.h"
44 #include "amdgpu.h"
45 #include "amdgpu_display.h"
46 #include "amdgpu_ucode.h"
47 #include "atom.h"
48 #include "amdgpu_dm.h"
49 #ifdef CONFIG_DRM_AMD_DC_HDCP
50 #include "amdgpu_dm_hdcp.h"
51 #include <drm/display/drm_hdcp_helper.h>
52 #endif
53 #include "amdgpu_pm.h"
54 #include "amdgpu_atombios.h"
55 
56 #include "amd_shared.h"
57 #include "amdgpu_dm_irq.h"
58 #include "dm_helpers.h"
59 #include "amdgpu_dm_mst_types.h"
60 #if defined(CONFIG_DEBUG_FS)
61 #include "amdgpu_dm_debugfs.h"
62 #endif
63 #include "amdgpu_dm_psr.h"
64 
65 #include "ivsrcid/ivsrcid_vislands30.h"
66 
67 #include "i2caux_interface.h"
68 #include <linux/module.h>
69 #include <linux/moduleparam.h>
70 #include <linux/types.h>
71 #include <linux/pm_runtime.h>
72 #include <linux/pci.h>
73 #include <linux/firmware.h>
74 #include <linux/component.h>
75 
76 #include <drm/display/drm_dp_mst_helper.h>
77 #include <drm/display/drm_hdmi_helper.h>
78 #include <drm/drm_atomic.h>
79 #include <drm/drm_atomic_uapi.h>
80 #include <drm/drm_atomic_helper.h>
81 #include <drm/drm_blend.h>
82 #include <drm/drm_fb_helper.h>
83 #include <drm/drm_fourcc.h>
84 #include <drm/drm_edid.h>
85 #include <drm/drm_vblank.h>
86 #include <drm/drm_audio_component.h>
87 #include <drm/drm_gem_atomic_helper.h>
88 
89 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
90 
91 #include "dcn/dcn_1_0_offset.h"
92 #include "dcn/dcn_1_0_sh_mask.h"
93 #include "soc15_hw_ip.h"
94 #include "vega10_ip_offset.h"
95 
96 #include "soc15_common.h"
97 
98 #include "modules/inc/mod_freesync.h"
99 #include "modules/power/power_helpers.h"
100 #include "modules/inc/mod_info_packet.h"
101 
102 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
103 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
104 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
105 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
106 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
107 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
108 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
109 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
110 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
111 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
112 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
113 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
114 #define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
115 MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
116 #define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin"
117 MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB);
118 #define FIRMWARE_DCN_315_DMUB "amdgpu/dcn_3_1_5_dmcub.bin"
119 MODULE_FIRMWARE(FIRMWARE_DCN_315_DMUB);
120 #define FIRMWARE_DCN316_DMUB "amdgpu/dcn_3_1_6_dmcub.bin"
121 MODULE_FIRMWARE(FIRMWARE_DCN316_DMUB);
122 
123 #define FIRMWARE_RAVEN_DMCU		"amdgpu/raven_dmcu.bin"
124 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
125 
126 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
127 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
128 
129 /* Number of bytes in PSP header for firmware. */
130 #define PSP_HEADER_BYTES 0x100
131 
132 /* Number of bytes in PSP footer for firmware. */
133 #define PSP_FOOTER_BYTES 0x100
134 
135 /**
136  * DOC: overview
137  *
138  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
139  * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
140  * requests into DC requests, and DC responses into DRM responses.
141  *
142  * The root control structure is &struct amdgpu_display_manager.
143  */
144 
145 /* basic init/fini API */
146 static int amdgpu_dm_init(struct amdgpu_device *adev);
147 static void amdgpu_dm_fini(struct amdgpu_device *adev);
148 static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
149 
150 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
151 {
152 	switch (link->dpcd_caps.dongle_type) {
153 	case DISPLAY_DONGLE_NONE:
154 		return DRM_MODE_SUBCONNECTOR_Native;
155 	case DISPLAY_DONGLE_DP_VGA_CONVERTER:
156 		return DRM_MODE_SUBCONNECTOR_VGA;
157 	case DISPLAY_DONGLE_DP_DVI_CONVERTER:
158 	case DISPLAY_DONGLE_DP_DVI_DONGLE:
159 		return DRM_MODE_SUBCONNECTOR_DVID;
160 	case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
161 	case DISPLAY_DONGLE_DP_HDMI_DONGLE:
162 		return DRM_MODE_SUBCONNECTOR_HDMIA;
163 	case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
164 	default:
165 		return DRM_MODE_SUBCONNECTOR_Unknown;
166 	}
167 }
168 
169 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
170 {
171 	struct dc_link *link = aconnector->dc_link;
172 	struct drm_connector *connector = &aconnector->base;
173 	enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
174 
175 	if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
176 		return;
177 
178 	if (aconnector->dc_sink)
179 		subconnector = get_subconnector_type(link);
180 
181 	drm_object_property_set_value(&connector->base,
182 			connector->dev->mode_config.dp_subconnector_property,
183 			subconnector);
184 }
185 
186 /*
187  * initializes drm_device display related structures, based on the information
188  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
189  * drm_encoder, drm_mode_config
190  *
191  * Returns 0 on success
192  */
193 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
194 /* removes and deallocates the drm structures, created by the above function */
195 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
196 
197 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
198 				struct drm_plane *plane,
199 				unsigned long possible_crtcs,
200 				const struct dc_plane_cap *plane_cap);
201 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
202 			       struct drm_plane *plane,
203 			       uint32_t link_index);
204 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
205 				    struct amdgpu_dm_connector *amdgpu_dm_connector,
206 				    uint32_t link_index,
207 				    struct amdgpu_encoder *amdgpu_encoder);
208 static int amdgpu_dm_encoder_init(struct drm_device *dev,
209 				  struct amdgpu_encoder *aencoder,
210 				  uint32_t link_index);
211 
212 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
213 
214 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
215 
216 static int amdgpu_dm_atomic_check(struct drm_device *dev,
217 				  struct drm_atomic_state *state);
218 
219 static void handle_cursor_update(struct drm_plane *plane,
220 				 struct drm_plane_state *old_plane_state);
221 
222 static const struct drm_format_info *
223 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
224 
225 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector);
226 static void handle_hpd_rx_irq(void *param);
227 
228 static bool
229 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
230 				 struct drm_crtc_state *new_crtc_state);
231 /*
232  * dm_vblank_get_counter
233  *
234  * @brief
235  * Get counter for number of vertical blanks
236  *
237  * @param
238  * struct amdgpu_device *adev - [in] desired amdgpu device
239  * int disp_idx - [in] which CRTC to get the counter from
240  *
241  * @return
242  * Counter for vertical blanks
243  */
244 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
245 {
246 	if (crtc >= adev->mode_info.num_crtc)
247 		return 0;
248 	else {
249 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
250 
251 		if (acrtc->dm_irq_params.stream == NULL) {
252 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
253 				  crtc);
254 			return 0;
255 		}
256 
257 		return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
258 	}
259 }
260 
261 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
262 				  u32 *vbl, u32 *position)
263 {
264 	uint32_t v_blank_start, v_blank_end, h_position, v_position;
265 
266 	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
267 		return -EINVAL;
268 	else {
269 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
270 
271 		if (acrtc->dm_irq_params.stream ==  NULL) {
272 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
273 				  crtc);
274 			return 0;
275 		}
276 
277 		/*
278 		 * TODO rework base driver to use values directly.
279 		 * for now parse it back into reg-format
280 		 */
281 		dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
282 					 &v_blank_start,
283 					 &v_blank_end,
284 					 &h_position,
285 					 &v_position);
286 
287 		*position = v_position | (h_position << 16);
288 		*vbl = v_blank_start | (v_blank_end << 16);
289 	}
290 
291 	return 0;
292 }
293 
294 static bool dm_is_idle(void *handle)
295 {
296 	/* XXX todo */
297 	return true;
298 }
299 
300 static int dm_wait_for_idle(void *handle)
301 {
302 	/* XXX todo */
303 	return 0;
304 }
305 
306 static bool dm_check_soft_reset(void *handle)
307 {
308 	return false;
309 }
310 
311 static int dm_soft_reset(void *handle)
312 {
313 	/* XXX todo */
314 	return 0;
315 }
316 
317 static struct amdgpu_crtc *
318 get_crtc_by_otg_inst(struct amdgpu_device *adev,
319 		     int otg_inst)
320 {
321 	struct drm_device *dev = adev_to_drm(adev);
322 	struct drm_crtc *crtc;
323 	struct amdgpu_crtc *amdgpu_crtc;
324 
325 	if (WARN_ON(otg_inst == -1))
326 		return adev->mode_info.crtcs[0];
327 
328 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
329 		amdgpu_crtc = to_amdgpu_crtc(crtc);
330 
331 		if (amdgpu_crtc->otg_inst == otg_inst)
332 			return amdgpu_crtc;
333 	}
334 
335 	return NULL;
336 }
337 
338 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
339 {
340 	return acrtc->dm_irq_params.freesync_config.state ==
341 		       VRR_STATE_ACTIVE_VARIABLE ||
342 	       acrtc->dm_irq_params.freesync_config.state ==
343 		       VRR_STATE_ACTIVE_FIXED;
344 }
345 
346 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
347 {
348 	return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
349 	       dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
350 }
351 
352 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
353 					      struct dm_crtc_state *new_state)
354 {
355 	if (new_state->freesync_config.state ==  VRR_STATE_ACTIVE_FIXED)
356 		return true;
357 	else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
358 		return true;
359 	else
360 		return false;
361 }
362 
363 /**
364  * dm_pflip_high_irq() - Handle pageflip interrupt
365  * @interrupt_params: ignored
366  *
367  * Handles the pageflip interrupt by notifying all interested parties
368  * that the pageflip has been completed.
369  */
370 static void dm_pflip_high_irq(void *interrupt_params)
371 {
372 	struct amdgpu_crtc *amdgpu_crtc;
373 	struct common_irq_params *irq_params = interrupt_params;
374 	struct amdgpu_device *adev = irq_params->adev;
375 	unsigned long flags;
376 	struct drm_pending_vblank_event *e;
377 	uint32_t vpos, hpos, v_blank_start, v_blank_end;
378 	bool vrr_active;
379 
380 	amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
381 
382 	/* IRQ could occur when in initial stage */
383 	/* TODO work and BO cleanup */
384 	if (amdgpu_crtc == NULL) {
385 		DC_LOG_PFLIP("CRTC is null, returning.\n");
386 		return;
387 	}
388 
389 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
390 
391 	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
392 		DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
393 						 amdgpu_crtc->pflip_status,
394 						 AMDGPU_FLIP_SUBMITTED,
395 						 amdgpu_crtc->crtc_id,
396 						 amdgpu_crtc);
397 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
398 		return;
399 	}
400 
401 	/* page flip completed. */
402 	e = amdgpu_crtc->event;
403 	amdgpu_crtc->event = NULL;
404 
405 	WARN_ON(!e);
406 
407 	vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
408 
409 	/* Fixed refresh rate, or VRR scanout position outside front-porch? */
410 	if (!vrr_active ||
411 	    !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
412 				      &v_blank_end, &hpos, &vpos) ||
413 	    (vpos < v_blank_start)) {
414 		/* Update to correct count and vblank timestamp if racing with
415 		 * vblank irq. This also updates to the correct vblank timestamp
416 		 * even in VRR mode, as scanout is past the front-porch atm.
417 		 */
418 		drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
419 
420 		/* Wake up userspace by sending the pageflip event with proper
421 		 * count and timestamp of vblank of flip completion.
422 		 */
423 		if (e) {
424 			drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
425 
426 			/* Event sent, so done with vblank for this flip */
427 			drm_crtc_vblank_put(&amdgpu_crtc->base);
428 		}
429 	} else if (e) {
430 		/* VRR active and inside front-porch: vblank count and
431 		 * timestamp for pageflip event will only be up to date after
432 		 * drm_crtc_handle_vblank() has been executed from late vblank
433 		 * irq handler after start of back-porch (vline 0). We queue the
434 		 * pageflip event for send-out by drm_crtc_handle_vblank() with
435 		 * updated timestamp and count, once it runs after us.
436 		 *
437 		 * We need to open-code this instead of using the helper
438 		 * drm_crtc_arm_vblank_event(), as that helper would
439 		 * call drm_crtc_accurate_vblank_count(), which we must
440 		 * not call in VRR mode while we are in front-porch!
441 		 */
442 
443 		/* sequence will be replaced by real count during send-out. */
444 		e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
445 		e->pipe = amdgpu_crtc->crtc_id;
446 
447 		list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
448 		e = NULL;
449 	}
450 
451 	/* Keep track of vblank of this flip for flip throttling. We use the
452 	 * cooked hw counter, as that one incremented at start of this vblank
453 	 * of pageflip completion, so last_flip_vblank is the forbidden count
454 	 * for queueing new pageflips if vsync + VRR is enabled.
455 	 */
456 	amdgpu_crtc->dm_irq_params.last_flip_vblank =
457 		amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
458 
459 	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
460 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
461 
462 	DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
463 		     amdgpu_crtc->crtc_id, amdgpu_crtc,
464 		     vrr_active, (int) !e);
465 }
466 
467 static void dm_vupdate_high_irq(void *interrupt_params)
468 {
469 	struct common_irq_params *irq_params = interrupt_params;
470 	struct amdgpu_device *adev = irq_params->adev;
471 	struct amdgpu_crtc *acrtc;
472 	struct drm_device *drm_dev;
473 	struct drm_vblank_crtc *vblank;
474 	ktime_t frame_duration_ns, previous_timestamp;
475 	unsigned long flags;
476 	int vrr_active;
477 
478 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
479 
480 	if (acrtc) {
481 		vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
482 		drm_dev = acrtc->base.dev;
483 		vblank = &drm_dev->vblank[acrtc->base.index];
484 		previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
485 		frame_duration_ns = vblank->time - previous_timestamp;
486 
487 		if (frame_duration_ns > 0) {
488 			trace_amdgpu_refresh_rate_track(acrtc->base.index,
489 						frame_duration_ns,
490 						ktime_divns(NSEC_PER_SEC, frame_duration_ns));
491 			atomic64_set(&irq_params->previous_timestamp, vblank->time);
492 		}
493 
494 		DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
495 			      acrtc->crtc_id,
496 			      vrr_active);
497 
498 		/* Core vblank handling is done here after end of front-porch in
499 		 * vrr mode, as vblank timestamping will give valid results
500 		 * while now done after front-porch. This will also deliver
501 		 * page-flip completion events that have been queued to us
502 		 * if a pageflip happened inside front-porch.
503 		 */
504 		if (vrr_active) {
505 			drm_crtc_handle_vblank(&acrtc->base);
506 
507 			/* BTR processing for pre-DCE12 ASICs */
508 			if (acrtc->dm_irq_params.stream &&
509 			    adev->family < AMDGPU_FAMILY_AI) {
510 				spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
511 				mod_freesync_handle_v_update(
512 				    adev->dm.freesync_module,
513 				    acrtc->dm_irq_params.stream,
514 				    &acrtc->dm_irq_params.vrr_params);
515 
516 				dc_stream_adjust_vmin_vmax(
517 				    adev->dm.dc,
518 				    acrtc->dm_irq_params.stream,
519 				    &acrtc->dm_irq_params.vrr_params.adjust);
520 				spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
521 			}
522 		}
523 	}
524 }
525 
526 /**
527  * dm_crtc_high_irq() - Handles CRTC interrupt
528  * @interrupt_params: used for determining the CRTC instance
529  *
530  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
531  * event handler.
532  */
533 static void dm_crtc_high_irq(void *interrupt_params)
534 {
535 	struct common_irq_params *irq_params = interrupt_params;
536 	struct amdgpu_device *adev = irq_params->adev;
537 	struct amdgpu_crtc *acrtc;
538 	unsigned long flags;
539 	int vrr_active;
540 
541 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
542 	if (!acrtc)
543 		return;
544 
545 	vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
546 
547 	DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
548 		      vrr_active, acrtc->dm_irq_params.active_planes);
549 
550 	/**
551 	 * Core vblank handling at start of front-porch is only possible
552 	 * in non-vrr mode, as only there vblank timestamping will give
553 	 * valid results while done in front-porch. Otherwise defer it
554 	 * to dm_vupdate_high_irq after end of front-porch.
555 	 */
556 	if (!vrr_active)
557 		drm_crtc_handle_vblank(&acrtc->base);
558 
559 	/**
560 	 * Following stuff must happen at start of vblank, for crc
561 	 * computation and below-the-range btr support in vrr mode.
562 	 */
563 	amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
564 
565 	/* BTR updates need to happen before VUPDATE on Vega and above. */
566 	if (adev->family < AMDGPU_FAMILY_AI)
567 		return;
568 
569 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
570 
571 	if (acrtc->dm_irq_params.stream &&
572 	    acrtc->dm_irq_params.vrr_params.supported &&
573 	    acrtc->dm_irq_params.freesync_config.state ==
574 		    VRR_STATE_ACTIVE_VARIABLE) {
575 		mod_freesync_handle_v_update(adev->dm.freesync_module,
576 					     acrtc->dm_irq_params.stream,
577 					     &acrtc->dm_irq_params.vrr_params);
578 
579 		dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
580 					   &acrtc->dm_irq_params.vrr_params.adjust);
581 	}
582 
583 	/*
584 	 * If there aren't any active_planes then DCH HUBP may be clock-gated.
585 	 * In that case, pageflip completion interrupts won't fire and pageflip
586 	 * completion events won't get delivered. Prevent this by sending
587 	 * pending pageflip events from here if a flip is still pending.
588 	 *
589 	 * If any planes are enabled, use dm_pflip_high_irq() instead, to
590 	 * avoid race conditions between flip programming and completion,
591 	 * which could cause too early flip completion events.
592 	 */
593 	if (adev->family >= AMDGPU_FAMILY_RV &&
594 	    acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
595 	    acrtc->dm_irq_params.active_planes == 0) {
596 		if (acrtc->event) {
597 			drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
598 			acrtc->event = NULL;
599 			drm_crtc_vblank_put(&acrtc->base);
600 		}
601 		acrtc->pflip_status = AMDGPU_FLIP_NONE;
602 	}
603 
604 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
605 }
606 
607 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
608 /**
609  * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
610  * DCN generation ASICs
611  * @interrupt_params: interrupt parameters
612  *
613  * Used to set crc window/read out crc value at vertical line 0 position
614  */
615 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
616 {
617 	struct common_irq_params *irq_params = interrupt_params;
618 	struct amdgpu_device *adev = irq_params->adev;
619 	struct amdgpu_crtc *acrtc;
620 
621 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
622 
623 	if (!acrtc)
624 		return;
625 
626 	amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
627 }
628 #endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */
629 
630 /**
631  * dmub_aux_setconfig_callback - Callback for AUX or SET_CONFIG command.
632  * @adev: amdgpu_device pointer
633  * @notify: dmub notification structure
634  *
635  * Dmub AUX or SET_CONFIG command completion processing callback
636  * Copies dmub notification to DM which is to be read by AUX command.
637  * issuing thread and also signals the event to wake up the thread.
638  */
639 static void dmub_aux_setconfig_callback(struct amdgpu_device *adev,
640 					struct dmub_notification *notify)
641 {
642 	if (adev->dm.dmub_notify)
643 		memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification));
644 	if (notify->type == DMUB_NOTIFICATION_AUX_REPLY)
645 		complete(&adev->dm.dmub_aux_transfer_done);
646 }
647 
648 /**
649  * dmub_hpd_callback - DMUB HPD interrupt processing callback.
650  * @adev: amdgpu_device pointer
651  * @notify: dmub notification structure
652  *
653  * Dmub Hpd interrupt processing callback. Gets displayindex through the
654  * ink index and calls helper to do the processing.
655  */
656 static void dmub_hpd_callback(struct amdgpu_device *adev,
657 			      struct dmub_notification *notify)
658 {
659 	struct amdgpu_dm_connector *aconnector;
660 	struct amdgpu_dm_connector *hpd_aconnector = NULL;
661 	struct drm_connector *connector;
662 	struct drm_connector_list_iter iter;
663 	struct dc_link *link;
664 	uint8_t link_index = 0;
665 	struct drm_device *dev;
666 
667 	if (adev == NULL)
668 		return;
669 
670 	if (notify == NULL) {
671 		DRM_ERROR("DMUB HPD callback notification was NULL");
672 		return;
673 	}
674 
675 	if (notify->link_index > adev->dm.dc->link_count) {
676 		DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index);
677 		return;
678 	}
679 
680 	link_index = notify->link_index;
681 	link = adev->dm.dc->links[link_index];
682 	dev = adev->dm.ddev;
683 
684 	drm_connector_list_iter_begin(dev, &iter);
685 	drm_for_each_connector_iter(connector, &iter) {
686 		aconnector = to_amdgpu_dm_connector(connector);
687 		if (link && aconnector->dc_link == link) {
688 			DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index);
689 			hpd_aconnector = aconnector;
690 			break;
691 		}
692 	}
693 	drm_connector_list_iter_end(&iter);
694 
695 	if (hpd_aconnector) {
696 		if (notify->type == DMUB_NOTIFICATION_HPD)
697 			handle_hpd_irq_helper(hpd_aconnector);
698 		else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ)
699 			handle_hpd_rx_irq(hpd_aconnector);
700 	}
701 }
702 
703 /**
704  * register_dmub_notify_callback - Sets callback for DMUB notify
705  * @adev: amdgpu_device pointer
706  * @type: Type of dmub notification
707  * @callback: Dmub interrupt callback function
708  * @dmub_int_thread_offload: offload indicator
709  *
710  * API to register a dmub callback handler for a dmub notification
711  * Also sets indicator whether callback processing to be offloaded.
712  * to dmub interrupt handling thread
713  * Return: true if successfully registered, false if there is existing registration
714  */
715 static bool register_dmub_notify_callback(struct amdgpu_device *adev,
716 					  enum dmub_notification_type type,
717 					  dmub_notify_interrupt_callback_t callback,
718 					  bool dmub_int_thread_offload)
719 {
720 	if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) {
721 		adev->dm.dmub_callback[type] = callback;
722 		adev->dm.dmub_thread_offload[type] = dmub_int_thread_offload;
723 	} else
724 		return false;
725 
726 	return true;
727 }
728 
729 static void dm_handle_hpd_work(struct work_struct *work)
730 {
731 	struct dmub_hpd_work *dmub_hpd_wrk;
732 
733 	dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work);
734 
735 	if (!dmub_hpd_wrk->dmub_notify) {
736 		DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL");
737 		return;
738 	}
739 
740 	if (dmub_hpd_wrk->dmub_notify->type < ARRAY_SIZE(dmub_hpd_wrk->adev->dm.dmub_callback)) {
741 		dmub_hpd_wrk->adev->dm.dmub_callback[dmub_hpd_wrk->dmub_notify->type](dmub_hpd_wrk->adev,
742 		dmub_hpd_wrk->dmub_notify);
743 	}
744 
745 	kfree(dmub_hpd_wrk->dmub_notify);
746 	kfree(dmub_hpd_wrk);
747 
748 }
749 
750 #define DMUB_TRACE_MAX_READ 64
751 /**
752  * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
753  * @interrupt_params: used for determining the Outbox instance
754  *
755  * Handles the Outbox Interrupt
756  * event handler.
757  */
758 static void dm_dmub_outbox1_low_irq(void *interrupt_params)
759 {
760 	struct dmub_notification notify;
761 	struct common_irq_params *irq_params = interrupt_params;
762 	struct amdgpu_device *adev = irq_params->adev;
763 	struct amdgpu_display_manager *dm = &adev->dm;
764 	struct dmcub_trace_buf_entry entry = { 0 };
765 	uint32_t count = 0;
766 	struct dmub_hpd_work *dmub_hpd_wrk;
767 	struct dc_link *plink = NULL;
768 
769 	if (dc_enable_dmub_notifications(adev->dm.dc) &&
770 		irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
771 
772 		do {
773 			dc_stat_get_dmub_notification(adev->dm.dc, &notify);
774 			if (notify.type >= ARRAY_SIZE(dm->dmub_thread_offload)) {
775 				DRM_ERROR("DM: notify type %d invalid!", notify.type);
776 				continue;
777 			}
778 			if (!dm->dmub_callback[notify.type]) {
779 				DRM_DEBUG_DRIVER("DMUB notification skipped, no handler: type=%d\n", notify.type);
780 				continue;
781 			}
782 			if (dm->dmub_thread_offload[notify.type] == true) {
783 				dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC);
784 				if (!dmub_hpd_wrk) {
785 					DRM_ERROR("Failed to allocate dmub_hpd_wrk");
786 					return;
787 				}
788 				dmub_hpd_wrk->dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_ATOMIC);
789 				if (!dmub_hpd_wrk->dmub_notify) {
790 					kfree(dmub_hpd_wrk);
791 					DRM_ERROR("Failed to allocate dmub_hpd_wrk->dmub_notify");
792 					return;
793 				}
794 				INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work);
795 				if (dmub_hpd_wrk->dmub_notify)
796 					memcpy(dmub_hpd_wrk->dmub_notify, &notify, sizeof(struct dmub_notification));
797 				dmub_hpd_wrk->adev = adev;
798 				if (notify.type == DMUB_NOTIFICATION_HPD) {
799 					plink = adev->dm.dc->links[notify.link_index];
800 					if (plink) {
801 						plink->hpd_status =
802 							notify.hpd_status == DP_HPD_PLUG;
803 					}
804 				}
805 				queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work);
806 			} else {
807 				dm->dmub_callback[notify.type](adev, &notify);
808 			}
809 		} while (notify.pending_notification);
810 	}
811 
812 
813 	do {
814 		if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
815 			trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
816 							entry.param0, entry.param1);
817 
818 			DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
819 				 entry.trace_code, entry.tick_count, entry.param0, entry.param1);
820 		} else
821 			break;
822 
823 		count++;
824 
825 	} while (count <= DMUB_TRACE_MAX_READ);
826 
827 	if (count > DMUB_TRACE_MAX_READ)
828 		DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ");
829 }
830 
831 static int dm_set_clockgating_state(void *handle,
832 		  enum amd_clockgating_state state)
833 {
834 	return 0;
835 }
836 
837 static int dm_set_powergating_state(void *handle,
838 		  enum amd_powergating_state state)
839 {
840 	return 0;
841 }
842 
843 /* Prototypes of private functions */
844 static int dm_early_init(void* handle);
845 
846 /* Allocate memory for FBC compressed data  */
847 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
848 {
849 	struct drm_device *dev = connector->dev;
850 	struct amdgpu_device *adev = drm_to_adev(dev);
851 	struct dm_compressor_info *compressor = &adev->dm.compressor;
852 	struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
853 	struct drm_display_mode *mode;
854 	unsigned long max_size = 0;
855 
856 	if (adev->dm.dc->fbc_compressor == NULL)
857 		return;
858 
859 	if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
860 		return;
861 
862 	if (compressor->bo_ptr)
863 		return;
864 
865 
866 	list_for_each_entry(mode, &connector->modes, head) {
867 		if (max_size < mode->htotal * mode->vtotal)
868 			max_size = mode->htotal * mode->vtotal;
869 	}
870 
871 	if (max_size) {
872 		int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
873 			    AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
874 			    &compressor->gpu_addr, &compressor->cpu_addr);
875 
876 		if (r)
877 			DRM_ERROR("DM: Failed to initialize FBC\n");
878 		else {
879 			adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
880 			DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
881 		}
882 
883 	}
884 
885 }
886 
887 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
888 					  int pipe, bool *enabled,
889 					  unsigned char *buf, int max_bytes)
890 {
891 	struct drm_device *dev = dev_get_drvdata(kdev);
892 	struct amdgpu_device *adev = drm_to_adev(dev);
893 	struct drm_connector *connector;
894 	struct drm_connector_list_iter conn_iter;
895 	struct amdgpu_dm_connector *aconnector;
896 	int ret = 0;
897 
898 	*enabled = false;
899 
900 	mutex_lock(&adev->dm.audio_lock);
901 
902 	drm_connector_list_iter_begin(dev, &conn_iter);
903 	drm_for_each_connector_iter(connector, &conn_iter) {
904 		aconnector = to_amdgpu_dm_connector(connector);
905 		if (aconnector->audio_inst != port)
906 			continue;
907 
908 		*enabled = true;
909 		ret = drm_eld_size(connector->eld);
910 		memcpy(buf, connector->eld, min(max_bytes, ret));
911 
912 		break;
913 	}
914 	drm_connector_list_iter_end(&conn_iter);
915 
916 	mutex_unlock(&adev->dm.audio_lock);
917 
918 	DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
919 
920 	return ret;
921 }
922 
923 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
924 	.get_eld = amdgpu_dm_audio_component_get_eld,
925 };
926 
927 static int amdgpu_dm_audio_component_bind(struct device *kdev,
928 				       struct device *hda_kdev, void *data)
929 {
930 	struct drm_device *dev = dev_get_drvdata(kdev);
931 	struct amdgpu_device *adev = drm_to_adev(dev);
932 	struct drm_audio_component *acomp = data;
933 
934 	acomp->ops = &amdgpu_dm_audio_component_ops;
935 	acomp->dev = kdev;
936 	adev->dm.audio_component = acomp;
937 
938 	return 0;
939 }
940 
941 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
942 					  struct device *hda_kdev, void *data)
943 {
944 	struct drm_device *dev = dev_get_drvdata(kdev);
945 	struct amdgpu_device *adev = drm_to_adev(dev);
946 	struct drm_audio_component *acomp = data;
947 
948 	acomp->ops = NULL;
949 	acomp->dev = NULL;
950 	adev->dm.audio_component = NULL;
951 }
952 
953 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
954 	.bind	= amdgpu_dm_audio_component_bind,
955 	.unbind	= amdgpu_dm_audio_component_unbind,
956 };
957 
958 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
959 {
960 	int i, ret;
961 
962 	if (!amdgpu_audio)
963 		return 0;
964 
965 	adev->mode_info.audio.enabled = true;
966 
967 	adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
968 
969 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
970 		adev->mode_info.audio.pin[i].channels = -1;
971 		adev->mode_info.audio.pin[i].rate = -1;
972 		adev->mode_info.audio.pin[i].bits_per_sample = -1;
973 		adev->mode_info.audio.pin[i].status_bits = 0;
974 		adev->mode_info.audio.pin[i].category_code = 0;
975 		adev->mode_info.audio.pin[i].connected = false;
976 		adev->mode_info.audio.pin[i].id =
977 			adev->dm.dc->res_pool->audios[i]->inst;
978 		adev->mode_info.audio.pin[i].offset = 0;
979 	}
980 
981 	ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
982 	if (ret < 0)
983 		return ret;
984 
985 	adev->dm.audio_registered = true;
986 
987 	return 0;
988 }
989 
990 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
991 {
992 	if (!amdgpu_audio)
993 		return;
994 
995 	if (!adev->mode_info.audio.enabled)
996 		return;
997 
998 	if (adev->dm.audio_registered) {
999 		component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
1000 		adev->dm.audio_registered = false;
1001 	}
1002 
1003 	/* TODO: Disable audio? */
1004 
1005 	adev->mode_info.audio.enabled = false;
1006 }
1007 
1008 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
1009 {
1010 	struct drm_audio_component *acomp = adev->dm.audio_component;
1011 
1012 	if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
1013 		DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
1014 
1015 		acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
1016 						 pin, -1);
1017 	}
1018 }
1019 
1020 static int dm_dmub_hw_init(struct amdgpu_device *adev)
1021 {
1022 	const struct dmcub_firmware_header_v1_0 *hdr;
1023 	struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1024 	struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
1025 	const struct firmware *dmub_fw = adev->dm.dmub_fw;
1026 	struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
1027 	struct abm *abm = adev->dm.dc->res_pool->abm;
1028 	struct dmub_srv_hw_params hw_params;
1029 	enum dmub_status status;
1030 	const unsigned char *fw_inst_const, *fw_bss_data;
1031 	uint32_t i, fw_inst_const_size, fw_bss_data_size;
1032 	bool has_hw_support;
1033 
1034 	if (!dmub_srv)
1035 		/* DMUB isn't supported on the ASIC. */
1036 		return 0;
1037 
1038 	if (!fb_info) {
1039 		DRM_ERROR("No framebuffer info for DMUB service.\n");
1040 		return -EINVAL;
1041 	}
1042 
1043 	if (!dmub_fw) {
1044 		/* Firmware required for DMUB support. */
1045 		DRM_ERROR("No firmware provided for DMUB.\n");
1046 		return -EINVAL;
1047 	}
1048 
1049 	status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
1050 	if (status != DMUB_STATUS_OK) {
1051 		DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
1052 		return -EINVAL;
1053 	}
1054 
1055 	if (!has_hw_support) {
1056 		DRM_INFO("DMUB unsupported on ASIC\n");
1057 		return 0;
1058 	}
1059 
1060 	/* Reset DMCUB if it was previously running - before we overwrite its memory. */
1061 	status = dmub_srv_hw_reset(dmub_srv);
1062 	if (status != DMUB_STATUS_OK)
1063 		DRM_WARN("Error resetting DMUB HW: %d\n", status);
1064 
1065 	hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
1066 
1067 	fw_inst_const = dmub_fw->data +
1068 			le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1069 			PSP_HEADER_BYTES;
1070 
1071 	fw_bss_data = dmub_fw->data +
1072 		      le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1073 		      le32_to_cpu(hdr->inst_const_bytes);
1074 
1075 	/* Copy firmware and bios info into FB memory. */
1076 	fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1077 			     PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1078 
1079 	fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1080 
1081 	/* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
1082 	 * amdgpu_ucode_init_single_fw will load dmub firmware
1083 	 * fw_inst_const part to cw0; otherwise, the firmware back door load
1084 	 * will be done by dm_dmub_hw_init
1085 	 */
1086 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1087 		memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
1088 				fw_inst_const_size);
1089 	}
1090 
1091 	if (fw_bss_data_size)
1092 		memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
1093 		       fw_bss_data, fw_bss_data_size);
1094 
1095 	/* Copy firmware bios info into FB memory. */
1096 	memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
1097 	       adev->bios_size);
1098 
1099 	/* Reset regions that need to be reset. */
1100 	memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
1101 	fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
1102 
1103 	memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
1104 	       fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
1105 
1106 	memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
1107 	       fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
1108 
1109 	/* Initialize hardware. */
1110 	memset(&hw_params, 0, sizeof(hw_params));
1111 	hw_params.fb_base = adev->gmc.fb_start;
1112 	hw_params.fb_offset = adev->gmc.aper_base;
1113 
1114 	/* backdoor load firmware and trigger dmub running */
1115 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
1116 		hw_params.load_inst_const = true;
1117 
1118 	if (dmcu)
1119 		hw_params.psp_version = dmcu->psp_version;
1120 
1121 	for (i = 0; i < fb_info->num_fb; ++i)
1122 		hw_params.fb[i] = &fb_info->fb[i];
1123 
1124 	switch (adev->ip_versions[DCE_HWIP][0]) {
1125 	case IP_VERSION(3, 1, 3): /* Only for this asic hw internal rev B0 */
1126 		hw_params.dpia_supported = true;
1127 		hw_params.disable_dpia = adev->dm.dc->debug.dpia_debug.bits.disable_dpia;
1128 		break;
1129 	default:
1130 		break;
1131 	}
1132 
1133 	status = dmub_srv_hw_init(dmub_srv, &hw_params);
1134 	if (status != DMUB_STATUS_OK) {
1135 		DRM_ERROR("Error initializing DMUB HW: %d\n", status);
1136 		return -EINVAL;
1137 	}
1138 
1139 	/* Wait for firmware load to finish. */
1140 	status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1141 	if (status != DMUB_STATUS_OK)
1142 		DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1143 
1144 	/* Init DMCU and ABM if available. */
1145 	if (dmcu && abm) {
1146 		dmcu->funcs->dmcu_init(dmcu);
1147 		abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1148 	}
1149 
1150 	if (!adev->dm.dc->ctx->dmub_srv)
1151 		adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
1152 	if (!adev->dm.dc->ctx->dmub_srv) {
1153 		DRM_ERROR("Couldn't allocate DC DMUB server!\n");
1154 		return -ENOMEM;
1155 	}
1156 
1157 	DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
1158 		 adev->dm.dmcub_fw_version);
1159 
1160 	return 0;
1161 }
1162 
1163 static void dm_dmub_hw_resume(struct amdgpu_device *adev)
1164 {
1165 	struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1166 	enum dmub_status status;
1167 	bool init;
1168 
1169 	if (!dmub_srv) {
1170 		/* DMUB isn't supported on the ASIC. */
1171 		return;
1172 	}
1173 
1174 	status = dmub_srv_is_hw_init(dmub_srv, &init);
1175 	if (status != DMUB_STATUS_OK)
1176 		DRM_WARN("DMUB hardware init check failed: %d\n", status);
1177 
1178 	if (status == DMUB_STATUS_OK && init) {
1179 		/* Wait for firmware load to finish. */
1180 		status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1181 		if (status != DMUB_STATUS_OK)
1182 			DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1183 	} else {
1184 		/* Perform the full hardware initialization. */
1185 		dm_dmub_hw_init(adev);
1186 	}
1187 }
1188 
1189 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
1190 {
1191 	uint64_t pt_base;
1192 	uint32_t logical_addr_low;
1193 	uint32_t logical_addr_high;
1194 	uint32_t agp_base, agp_bot, agp_top;
1195 	PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
1196 
1197 	memset(pa_config, 0, sizeof(*pa_config));
1198 
1199 	logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
1200 	pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
1201 
1202 	if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1203 		/*
1204 		 * Raven2 has a HW issue that it is unable to use the vram which
1205 		 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1206 		 * workaround that increase system aperture high address (add 1)
1207 		 * to get rid of the VM fault and hardware hang.
1208 		 */
1209 		logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
1210 	else
1211 		logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
1212 
1213 	agp_base = 0;
1214 	agp_bot = adev->gmc.agp_start >> 24;
1215 	agp_top = adev->gmc.agp_end >> 24;
1216 
1217 
1218 	page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
1219 	page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
1220 	page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
1221 	page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
1222 	page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
1223 	page_table_base.low_part = lower_32_bits(pt_base);
1224 
1225 	pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1226 	pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1227 
1228 	pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1229 	pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1230 	pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1231 
1232 	pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1233 	pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1234 	pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1235 
1236 	pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1237 	pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1238 	pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1239 
1240 	pa_config->is_hvm_enabled = 0;
1241 
1242 }
1243 
1244 static void vblank_control_worker(struct work_struct *work)
1245 {
1246 	struct vblank_control_work *vblank_work =
1247 		container_of(work, struct vblank_control_work, work);
1248 	struct amdgpu_display_manager *dm = vblank_work->dm;
1249 
1250 	mutex_lock(&dm->dc_lock);
1251 
1252 	if (vblank_work->enable)
1253 		dm->active_vblank_irq_count++;
1254 	else if(dm->active_vblank_irq_count)
1255 		dm->active_vblank_irq_count--;
1256 
1257 	dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
1258 
1259 	DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
1260 
1261 	/* Control PSR based on vblank requirements from OS */
1262 	if (vblank_work->stream && vblank_work->stream->link) {
1263 		if (vblank_work->enable) {
1264 			if (vblank_work->stream->link->psr_settings.psr_allow_active)
1265 				amdgpu_dm_psr_disable(vblank_work->stream);
1266 		} else if (vblank_work->stream->link->psr_settings.psr_feature_enabled &&
1267 			   !vblank_work->stream->link->psr_settings.psr_allow_active &&
1268 			   vblank_work->acrtc->dm_irq_params.allow_psr_entry) {
1269 			amdgpu_dm_psr_enable(vblank_work->stream);
1270 		}
1271 	}
1272 
1273 	mutex_unlock(&dm->dc_lock);
1274 
1275 	dc_stream_release(vblank_work->stream);
1276 
1277 	kfree(vblank_work);
1278 }
1279 
1280 static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
1281 {
1282 	struct hpd_rx_irq_offload_work *offload_work;
1283 	struct amdgpu_dm_connector *aconnector;
1284 	struct dc_link *dc_link;
1285 	struct amdgpu_device *adev;
1286 	enum dc_connection_type new_connection_type = dc_connection_none;
1287 	unsigned long flags;
1288 
1289 	offload_work = container_of(work, struct hpd_rx_irq_offload_work, work);
1290 	aconnector = offload_work->offload_wq->aconnector;
1291 
1292 	if (!aconnector) {
1293 		DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work");
1294 		goto skip;
1295 	}
1296 
1297 	adev = drm_to_adev(aconnector->base.dev);
1298 	dc_link = aconnector->dc_link;
1299 
1300 	mutex_lock(&aconnector->hpd_lock);
1301 	if (!dc_link_detect_sink(dc_link, &new_connection_type))
1302 		DRM_ERROR("KMS: Failed to detect connector\n");
1303 	mutex_unlock(&aconnector->hpd_lock);
1304 
1305 	if (new_connection_type == dc_connection_none)
1306 		goto skip;
1307 
1308 	if (amdgpu_in_reset(adev))
1309 		goto skip;
1310 
1311 	mutex_lock(&adev->dm.dc_lock);
1312 	if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST)
1313 		dc_link_dp_handle_automated_test(dc_link);
1314 	else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
1315 			hpd_rx_irq_check_link_loss_status(dc_link, &offload_work->data) &&
1316 			dc_link_dp_allow_hpd_rx_irq(dc_link)) {
1317 		dc_link_dp_handle_link_loss(dc_link);
1318 		spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
1319 		offload_work->offload_wq->is_handling_link_loss = false;
1320 		spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
1321 	}
1322 	mutex_unlock(&adev->dm.dc_lock);
1323 
1324 skip:
1325 	kfree(offload_work);
1326 
1327 }
1328 
1329 static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc)
1330 {
1331 	int max_caps = dc->caps.max_links;
1332 	int i = 0;
1333 	struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL;
1334 
1335 	hpd_rx_offload_wq = kcalloc(max_caps, sizeof(*hpd_rx_offload_wq), GFP_KERNEL);
1336 
1337 	if (!hpd_rx_offload_wq)
1338 		return NULL;
1339 
1340 
1341 	for (i = 0; i < max_caps; i++) {
1342 		hpd_rx_offload_wq[i].wq =
1343 				    create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq");
1344 
1345 		if (hpd_rx_offload_wq[i].wq == NULL) {
1346 			DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!");
1347 			return NULL;
1348 		}
1349 
1350 		spin_lock_init(&hpd_rx_offload_wq[i].offload_lock);
1351 	}
1352 
1353 	return hpd_rx_offload_wq;
1354 }
1355 
1356 struct amdgpu_stutter_quirk {
1357 	u16 chip_vendor;
1358 	u16 chip_device;
1359 	u16 subsys_vendor;
1360 	u16 subsys_device;
1361 	u8 revision;
1362 };
1363 
1364 static const struct amdgpu_stutter_quirk amdgpu_stutter_quirk_list[] = {
1365 	/* https://bugzilla.kernel.org/show_bug.cgi?id=214417 */
1366 	{ 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
1367 	{ 0, 0, 0, 0, 0 },
1368 };
1369 
1370 static bool dm_should_disable_stutter(struct pci_dev *pdev)
1371 {
1372 	const struct amdgpu_stutter_quirk *p = amdgpu_stutter_quirk_list;
1373 
1374 	while (p && p->chip_device != 0) {
1375 		if (pdev->vendor == p->chip_vendor &&
1376 		    pdev->device == p->chip_device &&
1377 		    pdev->subsystem_vendor == p->subsys_vendor &&
1378 		    pdev->subsystem_device == p->subsys_device &&
1379 		    pdev->revision == p->revision) {
1380 			return true;
1381 		}
1382 		++p;
1383 	}
1384 	return false;
1385 }
1386 
1387 static int amdgpu_dm_init(struct amdgpu_device *adev)
1388 {
1389 	struct dc_init_data init_data;
1390 #ifdef CONFIG_DRM_AMD_DC_HDCP
1391 	struct dc_callback_init init_params;
1392 #endif
1393 	int r;
1394 
1395 	adev->dm.ddev = adev_to_drm(adev);
1396 	adev->dm.adev = adev;
1397 
1398 	/* Zero all the fields */
1399 	memset(&init_data, 0, sizeof(init_data));
1400 #ifdef CONFIG_DRM_AMD_DC_HDCP
1401 	memset(&init_params, 0, sizeof(init_params));
1402 #endif
1403 
1404 	mutex_init(&adev->dm.dc_lock);
1405 	mutex_init(&adev->dm.audio_lock);
1406 	spin_lock_init(&adev->dm.vblank_lock);
1407 
1408 	if(amdgpu_dm_irq_init(adev)) {
1409 		DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1410 		goto error;
1411 	}
1412 
1413 	init_data.asic_id.chip_family = adev->family;
1414 
1415 	init_data.asic_id.pci_revision_id = adev->pdev->revision;
1416 	init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1417 	init_data.asic_id.chip_id = adev->pdev->device;
1418 
1419 	init_data.asic_id.vram_width = adev->gmc.vram_width;
1420 	/* TODO: initialize init_data.asic_id.vram_type here!!!! */
1421 	init_data.asic_id.atombios_base_address =
1422 		adev->mode_info.atom_context->bios;
1423 
1424 	init_data.driver = adev;
1425 
1426 	adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1427 
1428 	if (!adev->dm.cgs_device) {
1429 		DRM_ERROR("amdgpu: failed to create cgs device.\n");
1430 		goto error;
1431 	}
1432 
1433 	init_data.cgs_device = adev->dm.cgs_device;
1434 
1435 	init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1436 
1437 	switch (adev->ip_versions[DCE_HWIP][0]) {
1438 	case IP_VERSION(2, 1, 0):
1439 		switch (adev->dm.dmcub_fw_version) {
1440 		case 0: /* development */
1441 		case 0x1: /* linux-firmware.git hash 6d9f399 */
1442 		case 0x01000000: /* linux-firmware.git hash 9a0b0f4 */
1443 			init_data.flags.disable_dmcu = false;
1444 			break;
1445 		default:
1446 			init_data.flags.disable_dmcu = true;
1447 		}
1448 		break;
1449 	case IP_VERSION(2, 0, 3):
1450 		init_data.flags.disable_dmcu = true;
1451 		break;
1452 	default:
1453 		break;
1454 	}
1455 
1456 	switch (adev->asic_type) {
1457 	case CHIP_CARRIZO:
1458 	case CHIP_STONEY:
1459 		init_data.flags.gpu_vm_support = true;
1460 		break;
1461 	default:
1462 		switch (adev->ip_versions[DCE_HWIP][0]) {
1463 		case IP_VERSION(1, 0, 0):
1464 		case IP_VERSION(1, 0, 1):
1465 			/* enable S/G on PCO and RV2 */
1466 			if ((adev->apu_flags & AMD_APU_IS_RAVEN2) ||
1467 			    (adev->apu_flags & AMD_APU_IS_PICASSO))
1468 				init_data.flags.gpu_vm_support = true;
1469 			break;
1470 		case IP_VERSION(2, 1, 0):
1471 		case IP_VERSION(3, 0, 1):
1472 		case IP_VERSION(3, 1, 2):
1473 		case IP_VERSION(3, 1, 3):
1474 		case IP_VERSION(3, 1, 5):
1475 		case IP_VERSION(3, 1, 6):
1476 			init_data.flags.gpu_vm_support = true;
1477 			break;
1478 		default:
1479 			break;
1480 		}
1481 		break;
1482 	}
1483 
1484 	if (init_data.flags.gpu_vm_support)
1485 		adev->mode_info.gpu_vm_support = true;
1486 
1487 	if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1488 		init_data.flags.fbc_support = true;
1489 
1490 	if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1491 		init_data.flags.multi_mon_pp_mclk_switch = true;
1492 
1493 	if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1494 		init_data.flags.disable_fractional_pwm = true;
1495 
1496 	if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING)
1497 		init_data.flags.edp_no_power_sequencing = true;
1498 
1499 	if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP1_4A)
1500 		init_data.flags.allow_lttpr_non_transparent_mode.bits.DP1_4A = true;
1501 	if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP2_0)
1502 		init_data.flags.allow_lttpr_non_transparent_mode.bits.DP2_0 = true;
1503 
1504 	init_data.flags.seamless_boot_edp_requested = false;
1505 
1506 	if (check_seamless_boot_capability(adev)) {
1507 		init_data.flags.seamless_boot_edp_requested = true;
1508 		init_data.flags.allow_seamless_boot_optimization = true;
1509 		DRM_INFO("Seamless boot condition check passed\n");
1510 	}
1511 
1512 	INIT_LIST_HEAD(&adev->dm.da_list);
1513 	/* Display Core create. */
1514 	adev->dm.dc = dc_create(&init_data);
1515 
1516 	if (adev->dm.dc) {
1517 		DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1518 	} else {
1519 		DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1520 		goto error;
1521 	}
1522 
1523 	if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1524 		adev->dm.dc->debug.force_single_disp_pipe_split = false;
1525 		adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1526 	}
1527 
1528 	if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1529 		adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1530 	if (dm_should_disable_stutter(adev->pdev))
1531 		adev->dm.dc->debug.disable_stutter = true;
1532 
1533 	if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1534 		adev->dm.dc->debug.disable_stutter = true;
1535 
1536 	if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) {
1537 		adev->dm.dc->debug.disable_dsc = true;
1538 		adev->dm.dc->debug.disable_dsc_edp = true;
1539 	}
1540 
1541 	if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1542 		adev->dm.dc->debug.disable_clock_gate = true;
1543 
1544 	r = dm_dmub_hw_init(adev);
1545 	if (r) {
1546 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1547 		goto error;
1548 	}
1549 
1550 	dc_hardware_init(adev->dm.dc);
1551 
1552 	adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc);
1553 	if (!adev->dm.hpd_rx_offload_wq) {
1554 		DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n");
1555 		goto error;
1556 	}
1557 
1558 	if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) {
1559 		struct dc_phy_addr_space_config pa_config;
1560 
1561 		mmhub_read_system_context(adev, &pa_config);
1562 
1563 		// Call the DC init_memory func
1564 		dc_setup_system_context(adev->dm.dc, &pa_config);
1565 	}
1566 
1567 	adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1568 	if (!adev->dm.freesync_module) {
1569 		DRM_ERROR(
1570 		"amdgpu: failed to initialize freesync_module.\n");
1571 	} else
1572 		DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1573 				adev->dm.freesync_module);
1574 
1575 	amdgpu_dm_init_color_mod();
1576 
1577 	if (adev->dm.dc->caps.max_links > 0) {
1578 		adev->dm.vblank_control_workqueue =
1579 			create_singlethread_workqueue("dm_vblank_control_workqueue");
1580 		if (!adev->dm.vblank_control_workqueue)
1581 			DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1582 	}
1583 
1584 #ifdef CONFIG_DRM_AMD_DC_HDCP
1585 	if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) {
1586 		adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1587 
1588 		if (!adev->dm.hdcp_workqueue)
1589 			DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1590 		else
1591 			DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1592 
1593 		dc_init_callbacks(adev->dm.dc, &init_params);
1594 	}
1595 #endif
1596 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1597 	adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
1598 #endif
1599 	if (dc_enable_dmub_notifications(adev->dm.dc)) {
1600 		init_completion(&adev->dm.dmub_aux_transfer_done);
1601 		adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
1602 		if (!adev->dm.dmub_notify) {
1603 			DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
1604 			goto error;
1605 		}
1606 
1607 		adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq");
1608 		if (!adev->dm.delayed_hpd_wq) {
1609 			DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n");
1610 			goto error;
1611 		}
1612 
1613 		amdgpu_dm_outbox_init(adev);
1614 		if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY,
1615 			dmub_aux_setconfig_callback, false)) {
1616 			DRM_ERROR("amdgpu: fail to register dmub aux callback");
1617 			goto error;
1618 		}
1619 		if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) {
1620 			DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1621 			goto error;
1622 		}
1623 		if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true)) {
1624 			DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1625 			goto error;
1626 		}
1627 	}
1628 
1629 	if (amdgpu_dm_initialize_drm_device(adev)) {
1630 		DRM_ERROR(
1631 		"amdgpu: failed to initialize sw for display support.\n");
1632 		goto error;
1633 	}
1634 
1635 	/* create fake encoders for MST */
1636 	dm_dp_create_fake_mst_encoders(adev);
1637 
1638 	/* TODO: Add_display_info? */
1639 
1640 	/* TODO use dynamic cursor width */
1641 	adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1642 	adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1643 
1644 	if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1645 		DRM_ERROR(
1646 		"amdgpu: failed to initialize sw for display support.\n");
1647 		goto error;
1648 	}
1649 
1650 
1651 	DRM_DEBUG_DRIVER("KMS initialized.\n");
1652 
1653 	return 0;
1654 error:
1655 	amdgpu_dm_fini(adev);
1656 
1657 	return -EINVAL;
1658 }
1659 
1660 static int amdgpu_dm_early_fini(void *handle)
1661 {
1662 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1663 
1664 	amdgpu_dm_audio_fini(adev);
1665 
1666 	return 0;
1667 }
1668 
1669 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1670 {
1671 	int i;
1672 
1673 	if (adev->dm.vblank_control_workqueue) {
1674 		destroy_workqueue(adev->dm.vblank_control_workqueue);
1675 		adev->dm.vblank_control_workqueue = NULL;
1676 	}
1677 
1678 	for (i = 0; i < adev->dm.display_indexes_num; i++) {
1679 		drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1680 	}
1681 
1682 	amdgpu_dm_destroy_drm_device(&adev->dm);
1683 
1684 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1685 	if (adev->dm.crc_rd_wrk) {
1686 		flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1687 		kfree(adev->dm.crc_rd_wrk);
1688 		adev->dm.crc_rd_wrk = NULL;
1689 	}
1690 #endif
1691 #ifdef CONFIG_DRM_AMD_DC_HDCP
1692 	if (adev->dm.hdcp_workqueue) {
1693 		hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1694 		adev->dm.hdcp_workqueue = NULL;
1695 	}
1696 
1697 	if (adev->dm.dc)
1698 		dc_deinit_callbacks(adev->dm.dc);
1699 #endif
1700 
1701 	dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1702 
1703 	if (dc_enable_dmub_notifications(adev->dm.dc)) {
1704 		kfree(adev->dm.dmub_notify);
1705 		adev->dm.dmub_notify = NULL;
1706 		destroy_workqueue(adev->dm.delayed_hpd_wq);
1707 		adev->dm.delayed_hpd_wq = NULL;
1708 	}
1709 
1710 	if (adev->dm.dmub_bo)
1711 		amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1712 				      &adev->dm.dmub_bo_gpu_addr,
1713 				      &adev->dm.dmub_bo_cpu_addr);
1714 
1715 	if (adev->dm.hpd_rx_offload_wq) {
1716 		for (i = 0; i < adev->dm.dc->caps.max_links; i++) {
1717 			if (adev->dm.hpd_rx_offload_wq[i].wq) {
1718 				destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq);
1719 				adev->dm.hpd_rx_offload_wq[i].wq = NULL;
1720 			}
1721 		}
1722 
1723 		kfree(adev->dm.hpd_rx_offload_wq);
1724 		adev->dm.hpd_rx_offload_wq = NULL;
1725 	}
1726 
1727 	/* DC Destroy TODO: Replace destroy DAL */
1728 	if (adev->dm.dc)
1729 		dc_destroy(&adev->dm.dc);
1730 	/*
1731 	 * TODO: pageflip, vlank interrupt
1732 	 *
1733 	 * amdgpu_dm_irq_fini(adev);
1734 	 */
1735 
1736 	if (adev->dm.cgs_device) {
1737 		amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1738 		adev->dm.cgs_device = NULL;
1739 	}
1740 	if (adev->dm.freesync_module) {
1741 		mod_freesync_destroy(adev->dm.freesync_module);
1742 		adev->dm.freesync_module = NULL;
1743 	}
1744 
1745 	mutex_destroy(&adev->dm.audio_lock);
1746 	mutex_destroy(&adev->dm.dc_lock);
1747 
1748 	return;
1749 }
1750 
1751 static int load_dmcu_fw(struct amdgpu_device *adev)
1752 {
1753 	const char *fw_name_dmcu = NULL;
1754 	int r;
1755 	const struct dmcu_firmware_header_v1_0 *hdr;
1756 
1757 	switch(adev->asic_type) {
1758 #if defined(CONFIG_DRM_AMD_DC_SI)
1759 	case CHIP_TAHITI:
1760 	case CHIP_PITCAIRN:
1761 	case CHIP_VERDE:
1762 	case CHIP_OLAND:
1763 #endif
1764 	case CHIP_BONAIRE:
1765 	case CHIP_HAWAII:
1766 	case CHIP_KAVERI:
1767 	case CHIP_KABINI:
1768 	case CHIP_MULLINS:
1769 	case CHIP_TONGA:
1770 	case CHIP_FIJI:
1771 	case CHIP_CARRIZO:
1772 	case CHIP_STONEY:
1773 	case CHIP_POLARIS11:
1774 	case CHIP_POLARIS10:
1775 	case CHIP_POLARIS12:
1776 	case CHIP_VEGAM:
1777 	case CHIP_VEGA10:
1778 	case CHIP_VEGA12:
1779 	case CHIP_VEGA20:
1780 		return 0;
1781 	case CHIP_NAVI12:
1782 		fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1783 		break;
1784 	case CHIP_RAVEN:
1785 		if (ASICREV_IS_PICASSO(adev->external_rev_id))
1786 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1787 		else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1788 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1789 		else
1790 			return 0;
1791 		break;
1792 	default:
1793 		switch (adev->ip_versions[DCE_HWIP][0]) {
1794 		case IP_VERSION(2, 0, 2):
1795 		case IP_VERSION(2, 0, 3):
1796 		case IP_VERSION(2, 0, 0):
1797 		case IP_VERSION(2, 1, 0):
1798 		case IP_VERSION(3, 0, 0):
1799 		case IP_VERSION(3, 0, 2):
1800 		case IP_VERSION(3, 0, 3):
1801 		case IP_VERSION(3, 0, 1):
1802 		case IP_VERSION(3, 1, 2):
1803 		case IP_VERSION(3, 1, 3):
1804 		case IP_VERSION(3, 1, 5):
1805 		case IP_VERSION(3, 1, 6):
1806 			return 0;
1807 		default:
1808 			break;
1809 		}
1810 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1811 		return -EINVAL;
1812 	}
1813 
1814 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1815 		DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1816 		return 0;
1817 	}
1818 
1819 	r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1820 	if (r == -ENOENT) {
1821 		/* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1822 		DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1823 		adev->dm.fw_dmcu = NULL;
1824 		return 0;
1825 	}
1826 	if (r) {
1827 		dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1828 			fw_name_dmcu);
1829 		return r;
1830 	}
1831 
1832 	r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1833 	if (r) {
1834 		dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1835 			fw_name_dmcu);
1836 		release_firmware(adev->dm.fw_dmcu);
1837 		adev->dm.fw_dmcu = NULL;
1838 		return r;
1839 	}
1840 
1841 	hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1842 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1843 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1844 	adev->firmware.fw_size +=
1845 		ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1846 
1847 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1848 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1849 	adev->firmware.fw_size +=
1850 		ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1851 
1852 	adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1853 
1854 	DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1855 
1856 	return 0;
1857 }
1858 
1859 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1860 {
1861 	struct amdgpu_device *adev = ctx;
1862 
1863 	return dm_read_reg(adev->dm.dc->ctx, address);
1864 }
1865 
1866 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1867 				     uint32_t value)
1868 {
1869 	struct amdgpu_device *adev = ctx;
1870 
1871 	return dm_write_reg(adev->dm.dc->ctx, address, value);
1872 }
1873 
1874 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1875 {
1876 	struct dmub_srv_create_params create_params;
1877 	struct dmub_srv_region_params region_params;
1878 	struct dmub_srv_region_info region_info;
1879 	struct dmub_srv_fb_params fb_params;
1880 	struct dmub_srv_fb_info *fb_info;
1881 	struct dmub_srv *dmub_srv;
1882 	const struct dmcub_firmware_header_v1_0 *hdr;
1883 	const char *fw_name_dmub;
1884 	enum dmub_asic dmub_asic;
1885 	enum dmub_status status;
1886 	int r;
1887 
1888 	switch (adev->ip_versions[DCE_HWIP][0]) {
1889 	case IP_VERSION(2, 1, 0):
1890 		dmub_asic = DMUB_ASIC_DCN21;
1891 		fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1892 		if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1893 			fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1894 		break;
1895 	case IP_VERSION(3, 0, 0):
1896 		if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) {
1897 			dmub_asic = DMUB_ASIC_DCN30;
1898 			fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1899 		} else {
1900 			dmub_asic = DMUB_ASIC_DCN30;
1901 			fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1902 		}
1903 		break;
1904 	case IP_VERSION(3, 0, 1):
1905 		dmub_asic = DMUB_ASIC_DCN301;
1906 		fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1907 		break;
1908 	case IP_VERSION(3, 0, 2):
1909 		dmub_asic = DMUB_ASIC_DCN302;
1910 		fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1911 		break;
1912 	case IP_VERSION(3, 0, 3):
1913 		dmub_asic = DMUB_ASIC_DCN303;
1914 		fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
1915 		break;
1916 	case IP_VERSION(3, 1, 2):
1917 	case IP_VERSION(3, 1, 3):
1918 		dmub_asic = (adev->external_rev_id == YELLOW_CARP_B0) ? DMUB_ASIC_DCN31B : DMUB_ASIC_DCN31;
1919 		fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
1920 		break;
1921 	case IP_VERSION(3, 1, 5):
1922 		dmub_asic = DMUB_ASIC_DCN315;
1923 		fw_name_dmub = FIRMWARE_DCN_315_DMUB;
1924 		break;
1925 	case IP_VERSION(3, 1, 6):
1926 		dmub_asic = DMUB_ASIC_DCN316;
1927 		fw_name_dmub = FIRMWARE_DCN316_DMUB;
1928 		break;
1929 	default:
1930 		/* ASIC doesn't support DMUB. */
1931 		return 0;
1932 	}
1933 
1934 	r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1935 	if (r) {
1936 		DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1937 		return 0;
1938 	}
1939 
1940 	r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1941 	if (r) {
1942 		DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1943 		return 0;
1944 	}
1945 
1946 	hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1947 	adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1948 
1949 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1950 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1951 			AMDGPU_UCODE_ID_DMCUB;
1952 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1953 			adev->dm.dmub_fw;
1954 		adev->firmware.fw_size +=
1955 			ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1956 
1957 		DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1958 			 adev->dm.dmcub_fw_version);
1959 	}
1960 
1961 
1962 	adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1963 	dmub_srv = adev->dm.dmub_srv;
1964 
1965 	if (!dmub_srv) {
1966 		DRM_ERROR("Failed to allocate DMUB service!\n");
1967 		return -ENOMEM;
1968 	}
1969 
1970 	memset(&create_params, 0, sizeof(create_params));
1971 	create_params.user_ctx = adev;
1972 	create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1973 	create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1974 	create_params.asic = dmub_asic;
1975 
1976 	/* Create the DMUB service. */
1977 	status = dmub_srv_create(dmub_srv, &create_params);
1978 	if (status != DMUB_STATUS_OK) {
1979 		DRM_ERROR("Error creating DMUB service: %d\n", status);
1980 		return -EINVAL;
1981 	}
1982 
1983 	/* Calculate the size of all the regions for the DMUB service. */
1984 	memset(&region_params, 0, sizeof(region_params));
1985 
1986 	region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1987 					PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1988 	region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1989 	region_params.vbios_size = adev->bios_size;
1990 	region_params.fw_bss_data = region_params.bss_data_size ?
1991 		adev->dm.dmub_fw->data +
1992 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1993 		le32_to_cpu(hdr->inst_const_bytes) : NULL;
1994 	region_params.fw_inst_const =
1995 		adev->dm.dmub_fw->data +
1996 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1997 		PSP_HEADER_BYTES;
1998 
1999 	status = dmub_srv_calc_region_info(dmub_srv, &region_params,
2000 					   &region_info);
2001 
2002 	if (status != DMUB_STATUS_OK) {
2003 		DRM_ERROR("Error calculating DMUB region info: %d\n", status);
2004 		return -EINVAL;
2005 	}
2006 
2007 	/*
2008 	 * Allocate a framebuffer based on the total size of all the regions.
2009 	 * TODO: Move this into GART.
2010 	 */
2011 	r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
2012 				    AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
2013 				    &adev->dm.dmub_bo_gpu_addr,
2014 				    &adev->dm.dmub_bo_cpu_addr);
2015 	if (r)
2016 		return r;
2017 
2018 	/* Rebase the regions on the framebuffer address. */
2019 	memset(&fb_params, 0, sizeof(fb_params));
2020 	fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
2021 	fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
2022 	fb_params.region_info = &region_info;
2023 
2024 	adev->dm.dmub_fb_info =
2025 		kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
2026 	fb_info = adev->dm.dmub_fb_info;
2027 
2028 	if (!fb_info) {
2029 		DRM_ERROR(
2030 			"Failed to allocate framebuffer info for DMUB service!\n");
2031 		return -ENOMEM;
2032 	}
2033 
2034 	status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
2035 	if (status != DMUB_STATUS_OK) {
2036 		DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
2037 		return -EINVAL;
2038 	}
2039 
2040 	return 0;
2041 }
2042 
2043 static int dm_sw_init(void *handle)
2044 {
2045 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2046 	int r;
2047 
2048 	r = dm_dmub_sw_init(adev);
2049 	if (r)
2050 		return r;
2051 
2052 	return load_dmcu_fw(adev);
2053 }
2054 
2055 static int dm_sw_fini(void *handle)
2056 {
2057 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2058 
2059 	kfree(adev->dm.dmub_fb_info);
2060 	adev->dm.dmub_fb_info = NULL;
2061 
2062 	if (adev->dm.dmub_srv) {
2063 		dmub_srv_destroy(adev->dm.dmub_srv);
2064 		adev->dm.dmub_srv = NULL;
2065 	}
2066 
2067 	release_firmware(adev->dm.dmub_fw);
2068 	adev->dm.dmub_fw = NULL;
2069 
2070 	release_firmware(adev->dm.fw_dmcu);
2071 	adev->dm.fw_dmcu = NULL;
2072 
2073 	return 0;
2074 }
2075 
2076 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
2077 {
2078 	struct amdgpu_dm_connector *aconnector;
2079 	struct drm_connector *connector;
2080 	struct drm_connector_list_iter iter;
2081 	int ret = 0;
2082 
2083 	drm_connector_list_iter_begin(dev, &iter);
2084 	drm_for_each_connector_iter(connector, &iter) {
2085 		aconnector = to_amdgpu_dm_connector(connector);
2086 		if (aconnector->dc_link->type == dc_connection_mst_branch &&
2087 		    aconnector->mst_mgr.aux) {
2088 			DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
2089 					 aconnector,
2090 					 aconnector->base.base.id);
2091 
2092 			ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
2093 			if (ret < 0) {
2094 				DRM_ERROR("DM_MST: Failed to start MST\n");
2095 				aconnector->dc_link->type =
2096 					dc_connection_single;
2097 				break;
2098 			}
2099 		}
2100 	}
2101 	drm_connector_list_iter_end(&iter);
2102 
2103 	return ret;
2104 }
2105 
2106 static int dm_late_init(void *handle)
2107 {
2108 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2109 
2110 	struct dmcu_iram_parameters params;
2111 	unsigned int linear_lut[16];
2112 	int i;
2113 	struct dmcu *dmcu = NULL;
2114 
2115 	dmcu = adev->dm.dc->res_pool->dmcu;
2116 
2117 	for (i = 0; i < 16; i++)
2118 		linear_lut[i] = 0xFFFF * i / 15;
2119 
2120 	params.set = 0;
2121 	params.backlight_ramping_override = false;
2122 	params.backlight_ramping_start = 0xCCCC;
2123 	params.backlight_ramping_reduction = 0xCCCCCCCC;
2124 	params.backlight_lut_array_size = 16;
2125 	params.backlight_lut_array = linear_lut;
2126 
2127 	/* Min backlight level after ABM reduction,  Don't allow below 1%
2128 	 * 0xFFFF x 0.01 = 0x28F
2129 	 */
2130 	params.min_abm_backlight = 0x28F;
2131 	/* In the case where abm is implemented on dmcub,
2132 	* dmcu object will be null.
2133 	* ABM 2.4 and up are implemented on dmcub.
2134 	*/
2135 	if (dmcu) {
2136 		if (!dmcu_load_iram(dmcu, params))
2137 			return -EINVAL;
2138 	} else if (adev->dm.dc->ctx->dmub_srv) {
2139 		struct dc_link *edp_links[MAX_NUM_EDP];
2140 		int edp_num;
2141 
2142 		get_edp_links(adev->dm.dc, edp_links, &edp_num);
2143 		for (i = 0; i < edp_num; i++) {
2144 			if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i))
2145 				return -EINVAL;
2146 		}
2147 	}
2148 
2149 	return detect_mst_link_for_all_connectors(adev_to_drm(adev));
2150 }
2151 
2152 static void s3_handle_mst(struct drm_device *dev, bool suspend)
2153 {
2154 	struct amdgpu_dm_connector *aconnector;
2155 	struct drm_connector *connector;
2156 	struct drm_connector_list_iter iter;
2157 	struct drm_dp_mst_topology_mgr *mgr;
2158 	int ret;
2159 	bool need_hotplug = false;
2160 
2161 	drm_connector_list_iter_begin(dev, &iter);
2162 	drm_for_each_connector_iter(connector, &iter) {
2163 		aconnector = to_amdgpu_dm_connector(connector);
2164 		if (aconnector->dc_link->type != dc_connection_mst_branch ||
2165 		    aconnector->mst_port)
2166 			continue;
2167 
2168 		mgr = &aconnector->mst_mgr;
2169 
2170 		if (suspend) {
2171 			drm_dp_mst_topology_mgr_suspend(mgr);
2172 		} else {
2173 			ret = drm_dp_mst_topology_mgr_resume(mgr, true);
2174 			if (ret < 0) {
2175 				drm_dp_mst_topology_mgr_set_mst(mgr, false);
2176 				need_hotplug = true;
2177 			}
2178 		}
2179 	}
2180 	drm_connector_list_iter_end(&iter);
2181 
2182 	if (need_hotplug)
2183 		drm_kms_helper_hotplug_event(dev);
2184 }
2185 
2186 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
2187 {
2188 	int ret = 0;
2189 
2190 	/* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
2191 	 * on window driver dc implementation.
2192 	 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
2193 	 * should be passed to smu during boot up and resume from s3.
2194 	 * boot up: dc calculate dcn watermark clock settings within dc_create,
2195 	 * dcn20_resource_construct
2196 	 * then call pplib functions below to pass the settings to smu:
2197 	 * smu_set_watermarks_for_clock_ranges
2198 	 * smu_set_watermarks_table
2199 	 * navi10_set_watermarks_table
2200 	 * smu_write_watermarks_table
2201 	 *
2202 	 * For Renoir, clock settings of dcn watermark are also fixed values.
2203 	 * dc has implemented different flow for window driver:
2204 	 * dc_hardware_init / dc_set_power_state
2205 	 * dcn10_init_hw
2206 	 * notify_wm_ranges
2207 	 * set_wm_ranges
2208 	 * -- Linux
2209 	 * smu_set_watermarks_for_clock_ranges
2210 	 * renoir_set_watermarks_table
2211 	 * smu_write_watermarks_table
2212 	 *
2213 	 * For Linux,
2214 	 * dc_hardware_init -> amdgpu_dm_init
2215 	 * dc_set_power_state --> dm_resume
2216 	 *
2217 	 * therefore, this function apply to navi10/12/14 but not Renoir
2218 	 * *
2219 	 */
2220 	switch (adev->ip_versions[DCE_HWIP][0]) {
2221 	case IP_VERSION(2, 0, 2):
2222 	case IP_VERSION(2, 0, 0):
2223 		break;
2224 	default:
2225 		return 0;
2226 	}
2227 
2228 	ret = amdgpu_dpm_write_watermarks_table(adev);
2229 	if (ret) {
2230 		DRM_ERROR("Failed to update WMTABLE!\n");
2231 		return ret;
2232 	}
2233 
2234 	return 0;
2235 }
2236 
2237 /**
2238  * dm_hw_init() - Initialize DC device
2239  * @handle: The base driver device containing the amdgpu_dm device.
2240  *
2241  * Initialize the &struct amdgpu_display_manager device. This involves calling
2242  * the initializers of each DM component, then populating the struct with them.
2243  *
2244  * Although the function implies hardware initialization, both hardware and
2245  * software are initialized here. Splitting them out to their relevant init
2246  * hooks is a future TODO item.
2247  *
2248  * Some notable things that are initialized here:
2249  *
2250  * - Display Core, both software and hardware
2251  * - DC modules that we need (freesync and color management)
2252  * - DRM software states
2253  * - Interrupt sources and handlers
2254  * - Vblank support
2255  * - Debug FS entries, if enabled
2256  */
2257 static int dm_hw_init(void *handle)
2258 {
2259 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2260 	/* Create DAL display manager */
2261 	amdgpu_dm_init(adev);
2262 	amdgpu_dm_hpd_init(adev);
2263 
2264 	return 0;
2265 }
2266 
2267 /**
2268  * dm_hw_fini() - Teardown DC device
2269  * @handle: The base driver device containing the amdgpu_dm device.
2270  *
2271  * Teardown components within &struct amdgpu_display_manager that require
2272  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
2273  * were loaded. Also flush IRQ workqueues and disable them.
2274  */
2275 static int dm_hw_fini(void *handle)
2276 {
2277 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2278 
2279 	amdgpu_dm_hpd_fini(adev);
2280 
2281 	amdgpu_dm_irq_fini(adev);
2282 	amdgpu_dm_fini(adev);
2283 	return 0;
2284 }
2285 
2286 
2287 static int dm_enable_vblank(struct drm_crtc *crtc);
2288 static void dm_disable_vblank(struct drm_crtc *crtc);
2289 
2290 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
2291 				 struct dc_state *state, bool enable)
2292 {
2293 	enum dc_irq_source irq_source;
2294 	struct amdgpu_crtc *acrtc;
2295 	int rc = -EBUSY;
2296 	int i = 0;
2297 
2298 	for (i = 0; i < state->stream_count; i++) {
2299 		acrtc = get_crtc_by_otg_inst(
2300 				adev, state->stream_status[i].primary_otg_inst);
2301 
2302 		if (acrtc && state->stream_status[i].plane_count != 0) {
2303 			irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
2304 			rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
2305 			DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
2306 				      acrtc->crtc_id, enable ? "en" : "dis", rc);
2307 			if (rc)
2308 				DRM_WARN("Failed to %s pflip interrupts\n",
2309 					 enable ? "enable" : "disable");
2310 
2311 			if (enable) {
2312 				rc = dm_enable_vblank(&acrtc->base);
2313 				if (rc)
2314 					DRM_WARN("Failed to enable vblank interrupts\n");
2315 			} else {
2316 				dm_disable_vblank(&acrtc->base);
2317 			}
2318 
2319 		}
2320 	}
2321 
2322 }
2323 
2324 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
2325 {
2326 	struct dc_state *context = NULL;
2327 	enum dc_status res = DC_ERROR_UNEXPECTED;
2328 	int i;
2329 	struct dc_stream_state *del_streams[MAX_PIPES];
2330 	int del_streams_count = 0;
2331 
2332 	memset(del_streams, 0, sizeof(del_streams));
2333 
2334 	context = dc_create_state(dc);
2335 	if (context == NULL)
2336 		goto context_alloc_fail;
2337 
2338 	dc_resource_state_copy_construct_current(dc, context);
2339 
2340 	/* First remove from context all streams */
2341 	for (i = 0; i < context->stream_count; i++) {
2342 		struct dc_stream_state *stream = context->streams[i];
2343 
2344 		del_streams[del_streams_count++] = stream;
2345 	}
2346 
2347 	/* Remove all planes for removed streams and then remove the streams */
2348 	for (i = 0; i < del_streams_count; i++) {
2349 		if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
2350 			res = DC_FAIL_DETACH_SURFACES;
2351 			goto fail;
2352 		}
2353 
2354 		res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
2355 		if (res != DC_OK)
2356 			goto fail;
2357 	}
2358 
2359 	res = dc_commit_state(dc, context);
2360 
2361 fail:
2362 	dc_release_state(context);
2363 
2364 context_alloc_fail:
2365 	return res;
2366 }
2367 
2368 static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm)
2369 {
2370 	int i;
2371 
2372 	if (dm->hpd_rx_offload_wq) {
2373 		for (i = 0; i < dm->dc->caps.max_links; i++)
2374 			flush_workqueue(dm->hpd_rx_offload_wq[i].wq);
2375 	}
2376 }
2377 
2378 static int dm_suspend(void *handle)
2379 {
2380 	struct amdgpu_device *adev = handle;
2381 	struct amdgpu_display_manager *dm = &adev->dm;
2382 	int ret = 0;
2383 
2384 	if (amdgpu_in_reset(adev)) {
2385 		mutex_lock(&dm->dc_lock);
2386 
2387 		dc_allow_idle_optimizations(adev->dm.dc, false);
2388 
2389 		dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
2390 
2391 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
2392 
2393 		amdgpu_dm_commit_zero_streams(dm->dc);
2394 
2395 		amdgpu_dm_irq_suspend(adev);
2396 
2397 		hpd_rx_irq_work_suspend(dm);
2398 
2399 		return ret;
2400 	}
2401 
2402 	WARN_ON(adev->dm.cached_state);
2403 	adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
2404 
2405 	s3_handle_mst(adev_to_drm(adev), true);
2406 
2407 	amdgpu_dm_irq_suspend(adev);
2408 
2409 	hpd_rx_irq_work_suspend(dm);
2410 
2411 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
2412 
2413 	return 0;
2414 }
2415 
2416 struct amdgpu_dm_connector *
2417 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
2418 					     struct drm_crtc *crtc)
2419 {
2420 	uint32_t i;
2421 	struct drm_connector_state *new_con_state;
2422 	struct drm_connector *connector;
2423 	struct drm_crtc *crtc_from_state;
2424 
2425 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
2426 		crtc_from_state = new_con_state->crtc;
2427 
2428 		if (crtc_from_state == crtc)
2429 			return to_amdgpu_dm_connector(connector);
2430 	}
2431 
2432 	return NULL;
2433 }
2434 
2435 static void emulated_link_detect(struct dc_link *link)
2436 {
2437 	struct dc_sink_init_data sink_init_data = { 0 };
2438 	struct display_sink_capability sink_caps = { 0 };
2439 	enum dc_edid_status edid_status;
2440 	struct dc_context *dc_ctx = link->ctx;
2441 	struct dc_sink *sink = NULL;
2442 	struct dc_sink *prev_sink = NULL;
2443 
2444 	link->type = dc_connection_none;
2445 	prev_sink = link->local_sink;
2446 
2447 	if (prev_sink)
2448 		dc_sink_release(prev_sink);
2449 
2450 	switch (link->connector_signal) {
2451 	case SIGNAL_TYPE_HDMI_TYPE_A: {
2452 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2453 		sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2454 		break;
2455 	}
2456 
2457 	case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2458 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2459 		sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2460 		break;
2461 	}
2462 
2463 	case SIGNAL_TYPE_DVI_DUAL_LINK: {
2464 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2465 		sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2466 		break;
2467 	}
2468 
2469 	case SIGNAL_TYPE_LVDS: {
2470 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2471 		sink_caps.signal = SIGNAL_TYPE_LVDS;
2472 		break;
2473 	}
2474 
2475 	case SIGNAL_TYPE_EDP: {
2476 		sink_caps.transaction_type =
2477 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2478 		sink_caps.signal = SIGNAL_TYPE_EDP;
2479 		break;
2480 	}
2481 
2482 	case SIGNAL_TYPE_DISPLAY_PORT: {
2483 		sink_caps.transaction_type =
2484 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2485 		sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2486 		break;
2487 	}
2488 
2489 	default:
2490 		DC_ERROR("Invalid connector type! signal:%d\n",
2491 			link->connector_signal);
2492 		return;
2493 	}
2494 
2495 	sink_init_data.link = link;
2496 	sink_init_data.sink_signal = sink_caps.signal;
2497 
2498 	sink = dc_sink_create(&sink_init_data);
2499 	if (!sink) {
2500 		DC_ERROR("Failed to create sink!\n");
2501 		return;
2502 	}
2503 
2504 	/* dc_sink_create returns a new reference */
2505 	link->local_sink = sink;
2506 
2507 	edid_status = dm_helpers_read_local_edid(
2508 			link->ctx,
2509 			link,
2510 			sink);
2511 
2512 	if (edid_status != EDID_OK)
2513 		DC_ERROR("Failed to read EDID");
2514 
2515 }
2516 
2517 static void dm_gpureset_commit_state(struct dc_state *dc_state,
2518 				     struct amdgpu_display_manager *dm)
2519 {
2520 	struct {
2521 		struct dc_surface_update surface_updates[MAX_SURFACES];
2522 		struct dc_plane_info plane_infos[MAX_SURFACES];
2523 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
2524 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2525 		struct dc_stream_update stream_update;
2526 	} * bundle;
2527 	int k, m;
2528 
2529 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2530 
2531 	if (!bundle) {
2532 		dm_error("Failed to allocate update bundle\n");
2533 		goto cleanup;
2534 	}
2535 
2536 	for (k = 0; k < dc_state->stream_count; k++) {
2537 		bundle->stream_update.stream = dc_state->streams[k];
2538 
2539 		for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2540 			bundle->surface_updates[m].surface =
2541 				dc_state->stream_status->plane_states[m];
2542 			bundle->surface_updates[m].surface->force_full_update =
2543 				true;
2544 		}
2545 		dc_commit_updates_for_stream(
2546 			dm->dc, bundle->surface_updates,
2547 			dc_state->stream_status->plane_count,
2548 			dc_state->streams[k], &bundle->stream_update, dc_state);
2549 	}
2550 
2551 cleanup:
2552 	kfree(bundle);
2553 
2554 	return;
2555 }
2556 
2557 static void dm_set_dpms_off(struct dc_link *link, struct dm_crtc_state *acrtc_state)
2558 {
2559 	struct dc_stream_state *stream_state;
2560 	struct amdgpu_dm_connector *aconnector = link->priv;
2561 	struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2562 	struct dc_stream_update stream_update;
2563 	bool dpms_off = true;
2564 
2565 	memset(&stream_update, 0, sizeof(stream_update));
2566 	stream_update.dpms_off = &dpms_off;
2567 
2568 	mutex_lock(&adev->dm.dc_lock);
2569 	stream_state = dc_stream_find_from_link(link);
2570 
2571 	if (stream_state == NULL) {
2572 		DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2573 		mutex_unlock(&adev->dm.dc_lock);
2574 		return;
2575 	}
2576 
2577 	stream_update.stream = stream_state;
2578 	acrtc_state->force_dpms_off = true;
2579 	dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
2580 				     stream_state, &stream_update,
2581 				     stream_state->ctx->dc->current_state);
2582 	mutex_unlock(&adev->dm.dc_lock);
2583 }
2584 
2585 static int dm_resume(void *handle)
2586 {
2587 	struct amdgpu_device *adev = handle;
2588 	struct drm_device *ddev = adev_to_drm(adev);
2589 	struct amdgpu_display_manager *dm = &adev->dm;
2590 	struct amdgpu_dm_connector *aconnector;
2591 	struct drm_connector *connector;
2592 	struct drm_connector_list_iter iter;
2593 	struct drm_crtc *crtc;
2594 	struct drm_crtc_state *new_crtc_state;
2595 	struct dm_crtc_state *dm_new_crtc_state;
2596 	struct drm_plane *plane;
2597 	struct drm_plane_state *new_plane_state;
2598 	struct dm_plane_state *dm_new_plane_state;
2599 	struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2600 	enum dc_connection_type new_connection_type = dc_connection_none;
2601 	struct dc_state *dc_state;
2602 	int i, r, j;
2603 
2604 	if (amdgpu_in_reset(adev)) {
2605 		dc_state = dm->cached_dc_state;
2606 
2607 		/*
2608 		 * The dc->current_state is backed up into dm->cached_dc_state
2609 		 * before we commit 0 streams.
2610 		 *
2611 		 * DC will clear link encoder assignments on the real state
2612 		 * but the changes won't propagate over to the copy we made
2613 		 * before the 0 streams commit.
2614 		 *
2615 		 * DC expects that link encoder assignments are *not* valid
2616 		 * when committing a state, so as a workaround we can copy
2617 		 * off of the current state.
2618 		 *
2619 		 * We lose the previous assignments, but we had already
2620 		 * commit 0 streams anyway.
2621 		 */
2622 		link_enc_cfg_copy(adev->dm.dc->current_state, dc_state);
2623 
2624 		if (dc_enable_dmub_notifications(adev->dm.dc))
2625 			amdgpu_dm_outbox_init(adev);
2626 
2627 		r = dm_dmub_hw_init(adev);
2628 		if (r)
2629 			DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2630 
2631 		dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2632 		dc_resume(dm->dc);
2633 
2634 		amdgpu_dm_irq_resume_early(adev);
2635 
2636 		for (i = 0; i < dc_state->stream_count; i++) {
2637 			dc_state->streams[i]->mode_changed = true;
2638 			for (j = 0; j < dc_state->stream_status[i].plane_count; j++) {
2639 				dc_state->stream_status[i].plane_states[j]->update_flags.raw
2640 					= 0xffffffff;
2641 			}
2642 		}
2643 
2644 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
2645 
2646 		dm_gpureset_commit_state(dm->cached_dc_state, dm);
2647 
2648 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2649 
2650 		dc_release_state(dm->cached_dc_state);
2651 		dm->cached_dc_state = NULL;
2652 
2653 		amdgpu_dm_irq_resume_late(adev);
2654 
2655 		mutex_unlock(&dm->dc_lock);
2656 
2657 		return 0;
2658 	}
2659 	/* Recreate dc_state - DC invalidates it when setting power state to S3. */
2660 	dc_release_state(dm_state->context);
2661 	dm_state->context = dc_create_state(dm->dc);
2662 	/* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2663 	dc_resource_state_construct(dm->dc, dm_state->context);
2664 
2665 	/* Re-enable outbox interrupts for DPIA. */
2666 	if (dc_enable_dmub_notifications(adev->dm.dc))
2667 		amdgpu_dm_outbox_init(adev);
2668 
2669 	/* Before powering on DC we need to re-initialize DMUB. */
2670 	dm_dmub_hw_resume(adev);
2671 
2672 	/* power on hardware */
2673 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2674 
2675 	/* program HPD filter */
2676 	dc_resume(dm->dc);
2677 
2678 	/*
2679 	 * early enable HPD Rx IRQ, should be done before set mode as short
2680 	 * pulse interrupts are used for MST
2681 	 */
2682 	amdgpu_dm_irq_resume_early(adev);
2683 
2684 	/* On resume we need to rewrite the MSTM control bits to enable MST*/
2685 	s3_handle_mst(ddev, false);
2686 
2687 	/* Do detection*/
2688 	drm_connector_list_iter_begin(ddev, &iter);
2689 	drm_for_each_connector_iter(connector, &iter) {
2690 		aconnector = to_amdgpu_dm_connector(connector);
2691 
2692 		/*
2693 		 * this is the case when traversing through already created
2694 		 * MST connectors, should be skipped
2695 		 */
2696 		if (aconnector->dc_link &&
2697 		    aconnector->dc_link->type == dc_connection_mst_branch)
2698 			continue;
2699 
2700 		mutex_lock(&aconnector->hpd_lock);
2701 		if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2702 			DRM_ERROR("KMS: Failed to detect connector\n");
2703 
2704 		if (aconnector->base.force && new_connection_type == dc_connection_none)
2705 			emulated_link_detect(aconnector->dc_link);
2706 		else
2707 			dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2708 
2709 		if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2710 			aconnector->fake_enable = false;
2711 
2712 		if (aconnector->dc_sink)
2713 			dc_sink_release(aconnector->dc_sink);
2714 		aconnector->dc_sink = NULL;
2715 		amdgpu_dm_update_connector_after_detect(aconnector);
2716 		mutex_unlock(&aconnector->hpd_lock);
2717 	}
2718 	drm_connector_list_iter_end(&iter);
2719 
2720 	/* Force mode set in atomic commit */
2721 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2722 		new_crtc_state->active_changed = true;
2723 
2724 	/*
2725 	 * atomic_check is expected to create the dc states. We need to release
2726 	 * them here, since they were duplicated as part of the suspend
2727 	 * procedure.
2728 	 */
2729 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2730 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2731 		if (dm_new_crtc_state->stream) {
2732 			WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2733 			dc_stream_release(dm_new_crtc_state->stream);
2734 			dm_new_crtc_state->stream = NULL;
2735 		}
2736 	}
2737 
2738 	for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2739 		dm_new_plane_state = to_dm_plane_state(new_plane_state);
2740 		if (dm_new_plane_state->dc_state) {
2741 			WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2742 			dc_plane_state_release(dm_new_plane_state->dc_state);
2743 			dm_new_plane_state->dc_state = NULL;
2744 		}
2745 	}
2746 
2747 	drm_atomic_helper_resume(ddev, dm->cached_state);
2748 
2749 	dm->cached_state = NULL;
2750 
2751 	amdgpu_dm_irq_resume_late(adev);
2752 
2753 	amdgpu_dm_smu_write_watermarks_table(adev);
2754 
2755 	return 0;
2756 }
2757 
2758 /**
2759  * DOC: DM Lifecycle
2760  *
2761  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2762  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2763  * the base driver's device list to be initialized and torn down accordingly.
2764  *
2765  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2766  */
2767 
2768 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2769 	.name = "dm",
2770 	.early_init = dm_early_init,
2771 	.late_init = dm_late_init,
2772 	.sw_init = dm_sw_init,
2773 	.sw_fini = dm_sw_fini,
2774 	.early_fini = amdgpu_dm_early_fini,
2775 	.hw_init = dm_hw_init,
2776 	.hw_fini = dm_hw_fini,
2777 	.suspend = dm_suspend,
2778 	.resume = dm_resume,
2779 	.is_idle = dm_is_idle,
2780 	.wait_for_idle = dm_wait_for_idle,
2781 	.check_soft_reset = dm_check_soft_reset,
2782 	.soft_reset = dm_soft_reset,
2783 	.set_clockgating_state = dm_set_clockgating_state,
2784 	.set_powergating_state = dm_set_powergating_state,
2785 };
2786 
2787 const struct amdgpu_ip_block_version dm_ip_block =
2788 {
2789 	.type = AMD_IP_BLOCK_TYPE_DCE,
2790 	.major = 1,
2791 	.minor = 0,
2792 	.rev = 0,
2793 	.funcs = &amdgpu_dm_funcs,
2794 };
2795 
2796 
2797 /**
2798  * DOC: atomic
2799  *
2800  * *WIP*
2801  */
2802 
2803 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2804 	.fb_create = amdgpu_display_user_framebuffer_create,
2805 	.get_format_info = amd_get_format_info,
2806 	.output_poll_changed = drm_fb_helper_output_poll_changed,
2807 	.atomic_check = amdgpu_dm_atomic_check,
2808 	.atomic_commit = drm_atomic_helper_commit,
2809 };
2810 
2811 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2812 	.atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2813 };
2814 
2815 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2816 {
2817 	u32 max_cll, min_cll, max, min, q, r;
2818 	struct amdgpu_dm_backlight_caps *caps;
2819 	struct amdgpu_display_manager *dm;
2820 	struct drm_connector *conn_base;
2821 	struct amdgpu_device *adev;
2822 	struct dc_link *link = NULL;
2823 	static const u8 pre_computed_values[] = {
2824 		50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2825 		71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2826 	int i;
2827 
2828 	if (!aconnector || !aconnector->dc_link)
2829 		return;
2830 
2831 	link = aconnector->dc_link;
2832 	if (link->connector_signal != SIGNAL_TYPE_EDP)
2833 		return;
2834 
2835 	conn_base = &aconnector->base;
2836 	adev = drm_to_adev(conn_base->dev);
2837 	dm = &adev->dm;
2838 	for (i = 0; i < dm->num_of_edps; i++) {
2839 		if (link == dm->backlight_link[i])
2840 			break;
2841 	}
2842 	if (i >= dm->num_of_edps)
2843 		return;
2844 	caps = &dm->backlight_caps[i];
2845 	caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2846 	caps->aux_support = false;
2847 	max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2848 	min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2849 
2850 	if (caps->ext_caps->bits.oled == 1 /*||
2851 	    caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2852 	    caps->ext_caps->bits.hdr_aux_backlight_control == 1*/)
2853 		caps->aux_support = true;
2854 
2855 	if (amdgpu_backlight == 0)
2856 		caps->aux_support = false;
2857 	else if (amdgpu_backlight == 1)
2858 		caps->aux_support = true;
2859 
2860 	/* From the specification (CTA-861-G), for calculating the maximum
2861 	 * luminance we need to use:
2862 	 *	Luminance = 50*2**(CV/32)
2863 	 * Where CV is a one-byte value.
2864 	 * For calculating this expression we may need float point precision;
2865 	 * to avoid this complexity level, we take advantage that CV is divided
2866 	 * by a constant. From the Euclids division algorithm, we know that CV
2867 	 * can be written as: CV = 32*q + r. Next, we replace CV in the
2868 	 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2869 	 * need to pre-compute the value of r/32. For pre-computing the values
2870 	 * We just used the following Ruby line:
2871 	 *	(0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2872 	 * The results of the above expressions can be verified at
2873 	 * pre_computed_values.
2874 	 */
2875 	q = max_cll >> 5;
2876 	r = max_cll % 32;
2877 	max = (1 << q) * pre_computed_values[r];
2878 
2879 	// min luminance: maxLum * (CV/255)^2 / 100
2880 	q = DIV_ROUND_CLOSEST(min_cll, 255);
2881 	min = max * DIV_ROUND_CLOSEST((q * q), 100);
2882 
2883 	caps->aux_max_input_signal = max;
2884 	caps->aux_min_input_signal = min;
2885 }
2886 
2887 void amdgpu_dm_update_connector_after_detect(
2888 		struct amdgpu_dm_connector *aconnector)
2889 {
2890 	struct drm_connector *connector = &aconnector->base;
2891 	struct drm_device *dev = connector->dev;
2892 	struct dc_sink *sink;
2893 
2894 	/* MST handled by drm_mst framework */
2895 	if (aconnector->mst_mgr.mst_state == true)
2896 		return;
2897 
2898 	sink = aconnector->dc_link->local_sink;
2899 	if (sink)
2900 		dc_sink_retain(sink);
2901 
2902 	/*
2903 	 * Edid mgmt connector gets first update only in mode_valid hook and then
2904 	 * the connector sink is set to either fake or physical sink depends on link status.
2905 	 * Skip if already done during boot.
2906 	 */
2907 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2908 			&& aconnector->dc_em_sink) {
2909 
2910 		/*
2911 		 * For S3 resume with headless use eml_sink to fake stream
2912 		 * because on resume connector->sink is set to NULL
2913 		 */
2914 		mutex_lock(&dev->mode_config.mutex);
2915 
2916 		if (sink) {
2917 			if (aconnector->dc_sink) {
2918 				amdgpu_dm_update_freesync_caps(connector, NULL);
2919 				/*
2920 				 * retain and release below are used to
2921 				 * bump up refcount for sink because the link doesn't point
2922 				 * to it anymore after disconnect, so on next crtc to connector
2923 				 * reshuffle by UMD we will get into unwanted dc_sink release
2924 				 */
2925 				dc_sink_release(aconnector->dc_sink);
2926 			}
2927 			aconnector->dc_sink = sink;
2928 			dc_sink_retain(aconnector->dc_sink);
2929 			amdgpu_dm_update_freesync_caps(connector,
2930 					aconnector->edid);
2931 		} else {
2932 			amdgpu_dm_update_freesync_caps(connector, NULL);
2933 			if (!aconnector->dc_sink) {
2934 				aconnector->dc_sink = aconnector->dc_em_sink;
2935 				dc_sink_retain(aconnector->dc_sink);
2936 			}
2937 		}
2938 
2939 		mutex_unlock(&dev->mode_config.mutex);
2940 
2941 		if (sink)
2942 			dc_sink_release(sink);
2943 		return;
2944 	}
2945 
2946 	/*
2947 	 * TODO: temporary guard to look for proper fix
2948 	 * if this sink is MST sink, we should not do anything
2949 	 */
2950 	if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2951 		dc_sink_release(sink);
2952 		return;
2953 	}
2954 
2955 	if (aconnector->dc_sink == sink) {
2956 		/*
2957 		 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2958 		 * Do nothing!!
2959 		 */
2960 		DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2961 				aconnector->connector_id);
2962 		if (sink)
2963 			dc_sink_release(sink);
2964 		return;
2965 	}
2966 
2967 	DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2968 		aconnector->connector_id, aconnector->dc_sink, sink);
2969 
2970 	mutex_lock(&dev->mode_config.mutex);
2971 
2972 	/*
2973 	 * 1. Update status of the drm connector
2974 	 * 2. Send an event and let userspace tell us what to do
2975 	 */
2976 	if (sink) {
2977 		/*
2978 		 * TODO: check if we still need the S3 mode update workaround.
2979 		 * If yes, put it here.
2980 		 */
2981 		if (aconnector->dc_sink) {
2982 			amdgpu_dm_update_freesync_caps(connector, NULL);
2983 			dc_sink_release(aconnector->dc_sink);
2984 		}
2985 
2986 		aconnector->dc_sink = sink;
2987 		dc_sink_retain(aconnector->dc_sink);
2988 		if (sink->dc_edid.length == 0) {
2989 			aconnector->edid = NULL;
2990 			if (aconnector->dc_link->aux_mode) {
2991 				drm_dp_cec_unset_edid(
2992 					&aconnector->dm_dp_aux.aux);
2993 			}
2994 		} else {
2995 			aconnector->edid =
2996 				(struct edid *)sink->dc_edid.raw_edid;
2997 
2998 			if (aconnector->dc_link->aux_mode)
2999 				drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
3000 						    aconnector->edid);
3001 		}
3002 
3003 		drm_connector_update_edid_property(connector, aconnector->edid);
3004 		amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
3005 		update_connector_ext_caps(aconnector);
3006 	} else {
3007 		drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
3008 		amdgpu_dm_update_freesync_caps(connector, NULL);
3009 		drm_connector_update_edid_property(connector, NULL);
3010 		aconnector->num_modes = 0;
3011 		dc_sink_release(aconnector->dc_sink);
3012 		aconnector->dc_sink = NULL;
3013 		aconnector->edid = NULL;
3014 #ifdef CONFIG_DRM_AMD_DC_HDCP
3015 		/* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
3016 		if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
3017 			connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
3018 #endif
3019 	}
3020 
3021 	mutex_unlock(&dev->mode_config.mutex);
3022 
3023 	update_subconnector_property(aconnector);
3024 
3025 	if (sink)
3026 		dc_sink_release(sink);
3027 }
3028 
3029 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
3030 {
3031 	struct drm_connector *connector = &aconnector->base;
3032 	struct drm_device *dev = connector->dev;
3033 	enum dc_connection_type new_connection_type = dc_connection_none;
3034 	struct amdgpu_device *adev = drm_to_adev(dev);
3035 	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
3036 	struct dm_crtc_state *dm_crtc_state = NULL;
3037 
3038 	if (adev->dm.disable_hpd_irq)
3039 		return;
3040 
3041 	if (dm_con_state->base.state && dm_con_state->base.crtc)
3042 		dm_crtc_state = to_dm_crtc_state(drm_atomic_get_crtc_state(
3043 					dm_con_state->base.state,
3044 					dm_con_state->base.crtc));
3045 	/*
3046 	 * In case of failure or MST no need to update connector status or notify the OS
3047 	 * since (for MST case) MST does this in its own context.
3048 	 */
3049 	mutex_lock(&aconnector->hpd_lock);
3050 
3051 #ifdef CONFIG_DRM_AMD_DC_HDCP
3052 	if (adev->dm.hdcp_workqueue) {
3053 		hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
3054 		dm_con_state->update_hdcp = true;
3055 	}
3056 #endif
3057 	if (aconnector->fake_enable)
3058 		aconnector->fake_enable = false;
3059 
3060 	if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
3061 		DRM_ERROR("KMS: Failed to detect connector\n");
3062 
3063 	if (aconnector->base.force && new_connection_type == dc_connection_none) {
3064 		emulated_link_detect(aconnector->dc_link);
3065 
3066 		drm_modeset_lock_all(dev);
3067 		dm_restore_drm_connector_state(dev, connector);
3068 		drm_modeset_unlock_all(dev);
3069 
3070 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3071 			drm_kms_helper_connector_hotplug_event(connector);
3072 
3073 	} else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
3074 		if (new_connection_type == dc_connection_none &&
3075 		    aconnector->dc_link->type == dc_connection_none &&
3076 		    dm_crtc_state)
3077 			dm_set_dpms_off(aconnector->dc_link, dm_crtc_state);
3078 
3079 		amdgpu_dm_update_connector_after_detect(aconnector);
3080 
3081 		drm_modeset_lock_all(dev);
3082 		dm_restore_drm_connector_state(dev, connector);
3083 		drm_modeset_unlock_all(dev);
3084 
3085 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3086 			drm_kms_helper_connector_hotplug_event(connector);
3087 	}
3088 	mutex_unlock(&aconnector->hpd_lock);
3089 
3090 }
3091 
3092 static void handle_hpd_irq(void *param)
3093 {
3094 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3095 
3096 	handle_hpd_irq_helper(aconnector);
3097 
3098 }
3099 
3100 static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
3101 {
3102 	uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
3103 	uint8_t dret;
3104 	bool new_irq_handled = false;
3105 	int dpcd_addr;
3106 	int dpcd_bytes_to_read;
3107 
3108 	const int max_process_count = 30;
3109 	int process_count = 0;
3110 
3111 	const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
3112 
3113 	if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
3114 		dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
3115 		/* DPCD 0x200 - 0x201 for downstream IRQ */
3116 		dpcd_addr = DP_SINK_COUNT;
3117 	} else {
3118 		dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
3119 		/* DPCD 0x2002 - 0x2005 for downstream IRQ */
3120 		dpcd_addr = DP_SINK_COUNT_ESI;
3121 	}
3122 
3123 	dret = drm_dp_dpcd_read(
3124 		&aconnector->dm_dp_aux.aux,
3125 		dpcd_addr,
3126 		esi,
3127 		dpcd_bytes_to_read);
3128 
3129 	while (dret == dpcd_bytes_to_read &&
3130 		process_count < max_process_count) {
3131 		uint8_t retry;
3132 		dret = 0;
3133 
3134 		process_count++;
3135 
3136 		DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
3137 		/* handle HPD short pulse irq */
3138 		if (aconnector->mst_mgr.mst_state)
3139 			drm_dp_mst_hpd_irq(
3140 				&aconnector->mst_mgr,
3141 				esi,
3142 				&new_irq_handled);
3143 
3144 		if (new_irq_handled) {
3145 			/* ACK at DPCD to notify down stream */
3146 			const int ack_dpcd_bytes_to_write =
3147 				dpcd_bytes_to_read - 1;
3148 
3149 			for (retry = 0; retry < 3; retry++) {
3150 				uint8_t wret;
3151 
3152 				wret = drm_dp_dpcd_write(
3153 					&aconnector->dm_dp_aux.aux,
3154 					dpcd_addr + 1,
3155 					&esi[1],
3156 					ack_dpcd_bytes_to_write);
3157 				if (wret == ack_dpcd_bytes_to_write)
3158 					break;
3159 			}
3160 
3161 			/* check if there is new irq to be handled */
3162 			dret = drm_dp_dpcd_read(
3163 				&aconnector->dm_dp_aux.aux,
3164 				dpcd_addr,
3165 				esi,
3166 				dpcd_bytes_to_read);
3167 
3168 			new_irq_handled = false;
3169 		} else {
3170 			break;
3171 		}
3172 	}
3173 
3174 	if (process_count == max_process_count)
3175 		DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
3176 }
3177 
3178 static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq,
3179 							union hpd_irq_data hpd_irq_data)
3180 {
3181 	struct hpd_rx_irq_offload_work *offload_work =
3182 				kzalloc(sizeof(*offload_work), GFP_KERNEL);
3183 
3184 	if (!offload_work) {
3185 		DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n");
3186 		return;
3187 	}
3188 
3189 	INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work);
3190 	offload_work->data = hpd_irq_data;
3191 	offload_work->offload_wq = offload_wq;
3192 
3193 	queue_work(offload_wq->wq, &offload_work->work);
3194 	DRM_DEBUG_KMS("queue work to handle hpd_rx offload work");
3195 }
3196 
3197 static void handle_hpd_rx_irq(void *param)
3198 {
3199 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3200 	struct drm_connector *connector = &aconnector->base;
3201 	struct drm_device *dev = connector->dev;
3202 	struct dc_link *dc_link = aconnector->dc_link;
3203 	bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
3204 	bool result = false;
3205 	enum dc_connection_type new_connection_type = dc_connection_none;
3206 	struct amdgpu_device *adev = drm_to_adev(dev);
3207 	union hpd_irq_data hpd_irq_data;
3208 	bool link_loss = false;
3209 	bool has_left_work = false;
3210 	int idx = aconnector->base.index;
3211 	struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx];
3212 
3213 	memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
3214 
3215 	if (adev->dm.disable_hpd_irq)
3216 		return;
3217 
3218 	/*
3219 	 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
3220 	 * conflict, after implement i2c helper, this mutex should be
3221 	 * retired.
3222 	 */
3223 	mutex_lock(&aconnector->hpd_lock);
3224 
3225 	result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data,
3226 						&link_loss, true, &has_left_work);
3227 
3228 	if (!has_left_work)
3229 		goto out;
3230 
3231 	if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
3232 		schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3233 		goto out;
3234 	}
3235 
3236 	if (dc_link_dp_allow_hpd_rx_irq(dc_link)) {
3237 		if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
3238 			hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
3239 			dm_handle_mst_sideband_msg(aconnector);
3240 			goto out;
3241 		}
3242 
3243 		if (link_loss) {
3244 			bool skip = false;
3245 
3246 			spin_lock(&offload_wq->offload_lock);
3247 			skip = offload_wq->is_handling_link_loss;
3248 
3249 			if (!skip)
3250 				offload_wq->is_handling_link_loss = true;
3251 
3252 			spin_unlock(&offload_wq->offload_lock);
3253 
3254 			if (!skip)
3255 				schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3256 
3257 			goto out;
3258 		}
3259 	}
3260 
3261 out:
3262 	if (result && !is_mst_root_connector) {
3263 		/* Downstream Port status changed. */
3264 		if (!dc_link_detect_sink(dc_link, &new_connection_type))
3265 			DRM_ERROR("KMS: Failed to detect connector\n");
3266 
3267 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
3268 			emulated_link_detect(dc_link);
3269 
3270 			if (aconnector->fake_enable)
3271 				aconnector->fake_enable = false;
3272 
3273 			amdgpu_dm_update_connector_after_detect(aconnector);
3274 
3275 
3276 			drm_modeset_lock_all(dev);
3277 			dm_restore_drm_connector_state(dev, connector);
3278 			drm_modeset_unlock_all(dev);
3279 
3280 			drm_kms_helper_connector_hotplug_event(connector);
3281 		} else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
3282 
3283 			if (aconnector->fake_enable)
3284 				aconnector->fake_enable = false;
3285 
3286 			amdgpu_dm_update_connector_after_detect(aconnector);
3287 
3288 
3289 			drm_modeset_lock_all(dev);
3290 			dm_restore_drm_connector_state(dev, connector);
3291 			drm_modeset_unlock_all(dev);
3292 
3293 			drm_kms_helper_connector_hotplug_event(connector);
3294 		}
3295 	}
3296 #ifdef CONFIG_DRM_AMD_DC_HDCP
3297 	if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
3298 		if (adev->dm.hdcp_workqueue)
3299 			hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
3300 	}
3301 #endif
3302 
3303 	if (dc_link->type != dc_connection_mst_branch)
3304 		drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
3305 
3306 	mutex_unlock(&aconnector->hpd_lock);
3307 }
3308 
3309 static void register_hpd_handlers(struct amdgpu_device *adev)
3310 {
3311 	struct drm_device *dev = adev_to_drm(adev);
3312 	struct drm_connector *connector;
3313 	struct amdgpu_dm_connector *aconnector;
3314 	const struct dc_link *dc_link;
3315 	struct dc_interrupt_params int_params = {0};
3316 
3317 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3318 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3319 
3320 	list_for_each_entry(connector,
3321 			&dev->mode_config.connector_list, head)	{
3322 
3323 		aconnector = to_amdgpu_dm_connector(connector);
3324 		dc_link = aconnector->dc_link;
3325 
3326 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
3327 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3328 			int_params.irq_source = dc_link->irq_source_hpd;
3329 
3330 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
3331 					handle_hpd_irq,
3332 					(void *) aconnector);
3333 		}
3334 
3335 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
3336 
3337 			/* Also register for DP short pulse (hpd_rx). */
3338 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3339 			int_params.irq_source =	dc_link->irq_source_hpd_rx;
3340 
3341 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
3342 					handle_hpd_rx_irq,
3343 					(void *) aconnector);
3344 
3345 			if (adev->dm.hpd_rx_offload_wq)
3346 				adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
3347 					aconnector;
3348 		}
3349 	}
3350 }
3351 
3352 #if defined(CONFIG_DRM_AMD_DC_SI)
3353 /* Register IRQ sources and initialize IRQ callbacks */
3354 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
3355 {
3356 	struct dc *dc = adev->dm.dc;
3357 	struct common_irq_params *c_irq_params;
3358 	struct dc_interrupt_params int_params = {0};
3359 	int r;
3360 	int i;
3361 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3362 
3363 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3364 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3365 
3366 	/*
3367 	 * Actions of amdgpu_irq_add_id():
3368 	 * 1. Register a set() function with base driver.
3369 	 *    Base driver will call set() function to enable/disable an
3370 	 *    interrupt in DC hardware.
3371 	 * 2. Register amdgpu_dm_irq_handler().
3372 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3373 	 *    coming from DC hardware.
3374 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3375 	 *    for acknowledging and handling. */
3376 
3377 	/* Use VBLANK interrupt */
3378 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
3379 		r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
3380 		if (r) {
3381 			DRM_ERROR("Failed to add crtc irq id!\n");
3382 			return r;
3383 		}
3384 
3385 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3386 		int_params.irq_source =
3387 			dc_interrupt_to_irq_source(dc, i+1 , 0);
3388 
3389 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3390 
3391 		c_irq_params->adev = adev;
3392 		c_irq_params->irq_src = int_params.irq_source;
3393 
3394 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3395 				dm_crtc_high_irq, c_irq_params);
3396 	}
3397 
3398 	/* Use GRPH_PFLIP interrupt */
3399 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3400 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3401 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3402 		if (r) {
3403 			DRM_ERROR("Failed to add page flip irq id!\n");
3404 			return r;
3405 		}
3406 
3407 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3408 		int_params.irq_source =
3409 			dc_interrupt_to_irq_source(dc, i, 0);
3410 
3411 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3412 
3413 		c_irq_params->adev = adev;
3414 		c_irq_params->irq_src = int_params.irq_source;
3415 
3416 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3417 				dm_pflip_high_irq, c_irq_params);
3418 
3419 	}
3420 
3421 	/* HPD */
3422 	r = amdgpu_irq_add_id(adev, client_id,
3423 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3424 	if (r) {
3425 		DRM_ERROR("Failed to add hpd irq id!\n");
3426 		return r;
3427 	}
3428 
3429 	register_hpd_handlers(adev);
3430 
3431 	return 0;
3432 }
3433 #endif
3434 
3435 /* Register IRQ sources and initialize IRQ callbacks */
3436 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
3437 {
3438 	struct dc *dc = adev->dm.dc;
3439 	struct common_irq_params *c_irq_params;
3440 	struct dc_interrupt_params int_params = {0};
3441 	int r;
3442 	int i;
3443 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3444 
3445 	if (adev->family >= AMDGPU_FAMILY_AI)
3446 		client_id = SOC15_IH_CLIENTID_DCE;
3447 
3448 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3449 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3450 
3451 	/*
3452 	 * Actions of amdgpu_irq_add_id():
3453 	 * 1. Register a set() function with base driver.
3454 	 *    Base driver will call set() function to enable/disable an
3455 	 *    interrupt in DC hardware.
3456 	 * 2. Register amdgpu_dm_irq_handler().
3457 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3458 	 *    coming from DC hardware.
3459 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3460 	 *    for acknowledging and handling. */
3461 
3462 	/* Use VBLANK interrupt */
3463 	for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
3464 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
3465 		if (r) {
3466 			DRM_ERROR("Failed to add crtc irq id!\n");
3467 			return r;
3468 		}
3469 
3470 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3471 		int_params.irq_source =
3472 			dc_interrupt_to_irq_source(dc, i, 0);
3473 
3474 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3475 
3476 		c_irq_params->adev = adev;
3477 		c_irq_params->irq_src = int_params.irq_source;
3478 
3479 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3480 				dm_crtc_high_irq, c_irq_params);
3481 	}
3482 
3483 	/* Use VUPDATE interrupt */
3484 	for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
3485 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
3486 		if (r) {
3487 			DRM_ERROR("Failed to add vupdate irq id!\n");
3488 			return r;
3489 		}
3490 
3491 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3492 		int_params.irq_source =
3493 			dc_interrupt_to_irq_source(dc, i, 0);
3494 
3495 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3496 
3497 		c_irq_params->adev = adev;
3498 		c_irq_params->irq_src = int_params.irq_source;
3499 
3500 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3501 				dm_vupdate_high_irq, c_irq_params);
3502 	}
3503 
3504 	/* Use GRPH_PFLIP interrupt */
3505 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3506 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3507 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3508 		if (r) {
3509 			DRM_ERROR("Failed to add page flip irq id!\n");
3510 			return r;
3511 		}
3512 
3513 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3514 		int_params.irq_source =
3515 			dc_interrupt_to_irq_source(dc, i, 0);
3516 
3517 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3518 
3519 		c_irq_params->adev = adev;
3520 		c_irq_params->irq_src = int_params.irq_source;
3521 
3522 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3523 				dm_pflip_high_irq, c_irq_params);
3524 
3525 	}
3526 
3527 	/* HPD */
3528 	r = amdgpu_irq_add_id(adev, client_id,
3529 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3530 	if (r) {
3531 		DRM_ERROR("Failed to add hpd irq id!\n");
3532 		return r;
3533 	}
3534 
3535 	register_hpd_handlers(adev);
3536 
3537 	return 0;
3538 }
3539 
3540 /* Register IRQ sources and initialize IRQ callbacks */
3541 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3542 {
3543 	struct dc *dc = adev->dm.dc;
3544 	struct common_irq_params *c_irq_params;
3545 	struct dc_interrupt_params int_params = {0};
3546 	int r;
3547 	int i;
3548 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3549 	static const unsigned int vrtl_int_srcid[] = {
3550 		DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3551 		DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3552 		DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3553 		DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3554 		DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3555 		DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3556 	};
3557 #endif
3558 
3559 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3560 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3561 
3562 	/*
3563 	 * Actions of amdgpu_irq_add_id():
3564 	 * 1. Register a set() function with base driver.
3565 	 *    Base driver will call set() function to enable/disable an
3566 	 *    interrupt in DC hardware.
3567 	 * 2. Register amdgpu_dm_irq_handler().
3568 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3569 	 *    coming from DC hardware.
3570 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3571 	 *    for acknowledging and handling.
3572 	 */
3573 
3574 	/* Use VSTARTUP interrupt */
3575 	for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3576 			i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3577 			i++) {
3578 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
3579 
3580 		if (r) {
3581 			DRM_ERROR("Failed to add crtc irq id!\n");
3582 			return r;
3583 		}
3584 
3585 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3586 		int_params.irq_source =
3587 			dc_interrupt_to_irq_source(dc, i, 0);
3588 
3589 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3590 
3591 		c_irq_params->adev = adev;
3592 		c_irq_params->irq_src = int_params.irq_source;
3593 
3594 		amdgpu_dm_irq_register_interrupt(
3595 			adev, &int_params, dm_crtc_high_irq, c_irq_params);
3596 	}
3597 
3598 	/* Use otg vertical line interrupt */
3599 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3600 	for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3601 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3602 				vrtl_int_srcid[i], &adev->vline0_irq);
3603 
3604 		if (r) {
3605 			DRM_ERROR("Failed to add vline0 irq id!\n");
3606 			return r;
3607 		}
3608 
3609 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3610 		int_params.irq_source =
3611 			dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3612 
3613 		if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3614 			DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3615 			break;
3616 		}
3617 
3618 		c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3619 					- DC_IRQ_SOURCE_DC1_VLINE0];
3620 
3621 		c_irq_params->adev = adev;
3622 		c_irq_params->irq_src = int_params.irq_source;
3623 
3624 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3625 				dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3626 	}
3627 #endif
3628 
3629 	/* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3630 	 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3631 	 * to trigger at end of each vblank, regardless of state of the lock,
3632 	 * matching DCE behaviour.
3633 	 */
3634 	for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3635 	     i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3636 	     i++) {
3637 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3638 
3639 		if (r) {
3640 			DRM_ERROR("Failed to add vupdate irq id!\n");
3641 			return r;
3642 		}
3643 
3644 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3645 		int_params.irq_source =
3646 			dc_interrupt_to_irq_source(dc, i, 0);
3647 
3648 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3649 
3650 		c_irq_params->adev = adev;
3651 		c_irq_params->irq_src = int_params.irq_source;
3652 
3653 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3654 				dm_vupdate_high_irq, c_irq_params);
3655 	}
3656 
3657 	/* Use GRPH_PFLIP interrupt */
3658 	for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3659 			i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + dc->caps.max_otg_num - 1;
3660 			i++) {
3661 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
3662 		if (r) {
3663 			DRM_ERROR("Failed to add page flip irq id!\n");
3664 			return r;
3665 		}
3666 
3667 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3668 		int_params.irq_source =
3669 			dc_interrupt_to_irq_source(dc, i, 0);
3670 
3671 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3672 
3673 		c_irq_params->adev = adev;
3674 		c_irq_params->irq_src = int_params.irq_source;
3675 
3676 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3677 				dm_pflip_high_irq, c_irq_params);
3678 
3679 	}
3680 
3681 	/* HPD */
3682 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3683 			&adev->hpd_irq);
3684 	if (r) {
3685 		DRM_ERROR("Failed to add hpd irq id!\n");
3686 		return r;
3687 	}
3688 
3689 	register_hpd_handlers(adev);
3690 
3691 	return 0;
3692 }
3693 /* Register Outbox IRQ sources and initialize IRQ callbacks */
3694 static int register_outbox_irq_handlers(struct amdgpu_device *adev)
3695 {
3696 	struct dc *dc = adev->dm.dc;
3697 	struct common_irq_params *c_irq_params;
3698 	struct dc_interrupt_params int_params = {0};
3699 	int r, i;
3700 
3701 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3702 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3703 
3704 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
3705 			&adev->dmub_outbox_irq);
3706 	if (r) {
3707 		DRM_ERROR("Failed to add outbox irq id!\n");
3708 		return r;
3709 	}
3710 
3711 	if (dc->ctx->dmub_srv) {
3712 		i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
3713 		int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3714 		int_params.irq_source =
3715 		dc_interrupt_to_irq_source(dc, i, 0);
3716 
3717 		c_irq_params = &adev->dm.dmub_outbox_params[0];
3718 
3719 		c_irq_params->adev = adev;
3720 		c_irq_params->irq_src = int_params.irq_source;
3721 
3722 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3723 				dm_dmub_outbox1_low_irq, c_irq_params);
3724 	}
3725 
3726 	return 0;
3727 }
3728 
3729 /*
3730  * Acquires the lock for the atomic state object and returns
3731  * the new atomic state.
3732  *
3733  * This should only be called during atomic check.
3734  */
3735 int dm_atomic_get_state(struct drm_atomic_state *state,
3736 			struct dm_atomic_state **dm_state)
3737 {
3738 	struct drm_device *dev = state->dev;
3739 	struct amdgpu_device *adev = drm_to_adev(dev);
3740 	struct amdgpu_display_manager *dm = &adev->dm;
3741 	struct drm_private_state *priv_state;
3742 
3743 	if (*dm_state)
3744 		return 0;
3745 
3746 	priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3747 	if (IS_ERR(priv_state))
3748 		return PTR_ERR(priv_state);
3749 
3750 	*dm_state = to_dm_atomic_state(priv_state);
3751 
3752 	return 0;
3753 }
3754 
3755 static struct dm_atomic_state *
3756 dm_atomic_get_new_state(struct drm_atomic_state *state)
3757 {
3758 	struct drm_device *dev = state->dev;
3759 	struct amdgpu_device *adev = drm_to_adev(dev);
3760 	struct amdgpu_display_manager *dm = &adev->dm;
3761 	struct drm_private_obj *obj;
3762 	struct drm_private_state *new_obj_state;
3763 	int i;
3764 
3765 	for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3766 		if (obj->funcs == dm->atomic_obj.funcs)
3767 			return to_dm_atomic_state(new_obj_state);
3768 	}
3769 
3770 	return NULL;
3771 }
3772 
3773 static struct drm_private_state *
3774 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3775 {
3776 	struct dm_atomic_state *old_state, *new_state;
3777 
3778 	new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3779 	if (!new_state)
3780 		return NULL;
3781 
3782 	__drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3783 
3784 	old_state = to_dm_atomic_state(obj->state);
3785 
3786 	if (old_state && old_state->context)
3787 		new_state->context = dc_copy_state(old_state->context);
3788 
3789 	if (!new_state->context) {
3790 		kfree(new_state);
3791 		return NULL;
3792 	}
3793 
3794 	return &new_state->base;
3795 }
3796 
3797 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3798 				    struct drm_private_state *state)
3799 {
3800 	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3801 
3802 	if (dm_state && dm_state->context)
3803 		dc_release_state(dm_state->context);
3804 
3805 	kfree(dm_state);
3806 }
3807 
3808 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3809 	.atomic_duplicate_state = dm_atomic_duplicate_state,
3810 	.atomic_destroy_state = dm_atomic_destroy_state,
3811 };
3812 
3813 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3814 {
3815 	struct dm_atomic_state *state;
3816 	int r;
3817 
3818 	adev->mode_info.mode_config_initialized = true;
3819 
3820 	adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3821 	adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3822 
3823 	adev_to_drm(adev)->mode_config.max_width = 16384;
3824 	adev_to_drm(adev)->mode_config.max_height = 16384;
3825 
3826 	adev_to_drm(adev)->mode_config.preferred_depth = 24;
3827 	adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3828 	/* indicates support for immediate flip */
3829 	adev_to_drm(adev)->mode_config.async_page_flip = true;
3830 
3831 	adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3832 
3833 	state = kzalloc(sizeof(*state), GFP_KERNEL);
3834 	if (!state)
3835 		return -ENOMEM;
3836 
3837 	state->context = dc_create_state(adev->dm.dc);
3838 	if (!state->context) {
3839 		kfree(state);
3840 		return -ENOMEM;
3841 	}
3842 
3843 	dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3844 
3845 	drm_atomic_private_obj_init(adev_to_drm(adev),
3846 				    &adev->dm.atomic_obj,
3847 				    &state->base,
3848 				    &dm_atomic_state_funcs);
3849 
3850 	r = amdgpu_display_modeset_create_props(adev);
3851 	if (r) {
3852 		dc_release_state(state->context);
3853 		kfree(state);
3854 		return r;
3855 	}
3856 
3857 	r = amdgpu_dm_audio_init(adev);
3858 	if (r) {
3859 		dc_release_state(state->context);
3860 		kfree(state);
3861 		return r;
3862 	}
3863 
3864 	return 0;
3865 }
3866 
3867 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3868 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3869 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3870 
3871 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3872 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3873 
3874 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
3875 					    int bl_idx)
3876 {
3877 #if defined(CONFIG_ACPI)
3878 	struct amdgpu_dm_backlight_caps caps;
3879 
3880 	memset(&caps, 0, sizeof(caps));
3881 
3882 	if (dm->backlight_caps[bl_idx].caps_valid)
3883 		return;
3884 
3885 	amdgpu_acpi_get_backlight_caps(&caps);
3886 	if (caps.caps_valid) {
3887 		dm->backlight_caps[bl_idx].caps_valid = true;
3888 		if (caps.aux_support)
3889 			return;
3890 		dm->backlight_caps[bl_idx].min_input_signal = caps.min_input_signal;
3891 		dm->backlight_caps[bl_idx].max_input_signal = caps.max_input_signal;
3892 	} else {
3893 		dm->backlight_caps[bl_idx].min_input_signal =
3894 				AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3895 		dm->backlight_caps[bl_idx].max_input_signal =
3896 				AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3897 	}
3898 #else
3899 	if (dm->backlight_caps[bl_idx].aux_support)
3900 		return;
3901 
3902 	dm->backlight_caps[bl_idx].min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3903 	dm->backlight_caps[bl_idx].max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3904 #endif
3905 }
3906 
3907 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3908 				unsigned *min, unsigned *max)
3909 {
3910 	if (!caps)
3911 		return 0;
3912 
3913 	if (caps->aux_support) {
3914 		// Firmware limits are in nits, DC API wants millinits.
3915 		*max = 1000 * caps->aux_max_input_signal;
3916 		*min = 1000 * caps->aux_min_input_signal;
3917 	} else {
3918 		// Firmware limits are 8-bit, PWM control is 16-bit.
3919 		*max = 0x101 * caps->max_input_signal;
3920 		*min = 0x101 * caps->min_input_signal;
3921 	}
3922 	return 1;
3923 }
3924 
3925 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3926 					uint32_t brightness)
3927 {
3928 	unsigned min, max;
3929 
3930 	if (!get_brightness_range(caps, &min, &max))
3931 		return brightness;
3932 
3933 	// Rescale 0..255 to min..max
3934 	return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3935 				       AMDGPU_MAX_BL_LEVEL);
3936 }
3937 
3938 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3939 				      uint32_t brightness)
3940 {
3941 	unsigned min, max;
3942 
3943 	if (!get_brightness_range(caps, &min, &max))
3944 		return brightness;
3945 
3946 	if (brightness < min)
3947 		return 0;
3948 	// Rescale min..max to 0..255
3949 	return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3950 				 max - min);
3951 }
3952 
3953 static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
3954 					 int bl_idx,
3955 					 u32 user_brightness)
3956 {
3957 	struct amdgpu_dm_backlight_caps caps;
3958 	struct dc_link *link;
3959 	u32 brightness;
3960 	bool rc;
3961 
3962 	amdgpu_dm_update_backlight_caps(dm, bl_idx);
3963 	caps = dm->backlight_caps[bl_idx];
3964 
3965 	dm->brightness[bl_idx] = user_brightness;
3966 	/* update scratch register */
3967 	if (bl_idx == 0)
3968 		amdgpu_atombios_scratch_regs_set_backlight_level(dm->adev, dm->brightness[bl_idx]);
3969 	brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]);
3970 	link = (struct dc_link *)dm->backlight_link[bl_idx];
3971 
3972 	/* Change brightness based on AUX property */
3973 	if (caps.aux_support) {
3974 		rc = dc_link_set_backlight_level_nits(link, true, brightness,
3975 						      AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3976 		if (!rc)
3977 			DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", bl_idx);
3978 	} else {
3979 		rc = dc_link_set_backlight_level(link, brightness, 0);
3980 		if (!rc)
3981 			DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx);
3982 	}
3983 
3984 	if (rc)
3985 		dm->actual_brightness[bl_idx] = user_brightness;
3986 }
3987 
3988 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3989 {
3990 	struct amdgpu_display_manager *dm = bl_get_data(bd);
3991 	int i;
3992 
3993 	for (i = 0; i < dm->num_of_edps; i++) {
3994 		if (bd == dm->backlight_dev[i])
3995 			break;
3996 	}
3997 	if (i >= AMDGPU_DM_MAX_NUM_EDP)
3998 		i = 0;
3999 	amdgpu_dm_backlight_set_level(dm, i, bd->props.brightness);
4000 
4001 	return 0;
4002 }
4003 
4004 static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm,
4005 					 int bl_idx)
4006 {
4007 	struct amdgpu_dm_backlight_caps caps;
4008 	struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx];
4009 
4010 	amdgpu_dm_update_backlight_caps(dm, bl_idx);
4011 	caps = dm->backlight_caps[bl_idx];
4012 
4013 	if (caps.aux_support) {
4014 		u32 avg, peak;
4015 		bool rc;
4016 
4017 		rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
4018 		if (!rc)
4019 			return dm->brightness[bl_idx];
4020 		return convert_brightness_to_user(&caps, avg);
4021 	} else {
4022 		int ret = dc_link_get_backlight_level(link);
4023 
4024 		if (ret == DC_ERROR_UNEXPECTED)
4025 			return dm->brightness[bl_idx];
4026 		return convert_brightness_to_user(&caps, ret);
4027 	}
4028 }
4029 
4030 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
4031 {
4032 	struct amdgpu_display_manager *dm = bl_get_data(bd);
4033 	int i;
4034 
4035 	for (i = 0; i < dm->num_of_edps; i++) {
4036 		if (bd == dm->backlight_dev[i])
4037 			break;
4038 	}
4039 	if (i >= AMDGPU_DM_MAX_NUM_EDP)
4040 		i = 0;
4041 	return amdgpu_dm_backlight_get_level(dm, i);
4042 }
4043 
4044 static const struct backlight_ops amdgpu_dm_backlight_ops = {
4045 	.options = BL_CORE_SUSPENDRESUME,
4046 	.get_brightness = amdgpu_dm_backlight_get_brightness,
4047 	.update_status	= amdgpu_dm_backlight_update_status,
4048 };
4049 
4050 static void
4051 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
4052 {
4053 	char bl_name[16];
4054 	struct backlight_properties props = { 0 };
4055 
4056 	amdgpu_dm_update_backlight_caps(dm, dm->num_of_edps);
4057 	dm->brightness[dm->num_of_edps] = AMDGPU_MAX_BL_LEVEL;
4058 
4059 	props.max_brightness = AMDGPU_MAX_BL_LEVEL;
4060 	props.brightness = AMDGPU_MAX_BL_LEVEL;
4061 	props.type = BACKLIGHT_RAW;
4062 
4063 	snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
4064 		 adev_to_drm(dm->adev)->primary->index + dm->num_of_edps);
4065 
4066 	dm->backlight_dev[dm->num_of_edps] = backlight_device_register(bl_name,
4067 								       adev_to_drm(dm->adev)->dev,
4068 								       dm,
4069 								       &amdgpu_dm_backlight_ops,
4070 								       &props);
4071 
4072 	if (IS_ERR(dm->backlight_dev[dm->num_of_edps]))
4073 		DRM_ERROR("DM: Backlight registration failed!\n");
4074 	else
4075 		DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
4076 }
4077 #endif
4078 
4079 static int initialize_plane(struct amdgpu_display_manager *dm,
4080 			    struct amdgpu_mode_info *mode_info, int plane_id,
4081 			    enum drm_plane_type plane_type,
4082 			    const struct dc_plane_cap *plane_cap)
4083 {
4084 	struct drm_plane *plane;
4085 	unsigned long possible_crtcs;
4086 	int ret = 0;
4087 
4088 	plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
4089 	if (!plane) {
4090 		DRM_ERROR("KMS: Failed to allocate plane\n");
4091 		return -ENOMEM;
4092 	}
4093 	plane->type = plane_type;
4094 
4095 	/*
4096 	 * HACK: IGT tests expect that the primary plane for a CRTC
4097 	 * can only have one possible CRTC. Only expose support for
4098 	 * any CRTC if they're not going to be used as a primary plane
4099 	 * for a CRTC - like overlay or underlay planes.
4100 	 */
4101 	possible_crtcs = 1 << plane_id;
4102 	if (plane_id >= dm->dc->caps.max_streams)
4103 		possible_crtcs = 0xff;
4104 
4105 	ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
4106 
4107 	if (ret) {
4108 		DRM_ERROR("KMS: Failed to initialize plane\n");
4109 		kfree(plane);
4110 		return ret;
4111 	}
4112 
4113 	if (mode_info)
4114 		mode_info->planes[plane_id] = plane;
4115 
4116 	return ret;
4117 }
4118 
4119 
4120 static void register_backlight_device(struct amdgpu_display_manager *dm,
4121 				      struct dc_link *link)
4122 {
4123 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
4124 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
4125 
4126 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
4127 	    link->type != dc_connection_none) {
4128 		/*
4129 		 * Event if registration failed, we should continue with
4130 		 * DM initialization because not having a backlight control
4131 		 * is better then a black screen.
4132 		 */
4133 		if (!dm->backlight_dev[dm->num_of_edps])
4134 			amdgpu_dm_register_backlight_device(dm);
4135 
4136 		if (dm->backlight_dev[dm->num_of_edps]) {
4137 			dm->backlight_link[dm->num_of_edps] = link;
4138 			dm->num_of_edps++;
4139 		}
4140 	}
4141 #endif
4142 }
4143 
4144 
4145 /*
4146  * In this architecture, the association
4147  * connector -> encoder -> crtc
4148  * id not really requried. The crtc and connector will hold the
4149  * display_index as an abstraction to use with DAL component
4150  *
4151  * Returns 0 on success
4152  */
4153 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
4154 {
4155 	struct amdgpu_display_manager *dm = &adev->dm;
4156 	int32_t i;
4157 	struct amdgpu_dm_connector *aconnector = NULL;
4158 	struct amdgpu_encoder *aencoder = NULL;
4159 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
4160 	uint32_t link_cnt;
4161 	int32_t primary_planes;
4162 	enum dc_connection_type new_connection_type = dc_connection_none;
4163 	const struct dc_plane_cap *plane;
4164 	bool psr_feature_enabled = false;
4165 
4166 	dm->display_indexes_num = dm->dc->caps.max_streams;
4167 	/* Update the actual used number of crtc */
4168 	adev->mode_info.num_crtc = adev->dm.display_indexes_num;
4169 
4170 	link_cnt = dm->dc->caps.max_links;
4171 	if (amdgpu_dm_mode_config_init(dm->adev)) {
4172 		DRM_ERROR("DM: Failed to initialize mode config\n");
4173 		return -EINVAL;
4174 	}
4175 
4176 	/* There is one primary plane per CRTC */
4177 	primary_planes = dm->dc->caps.max_streams;
4178 	ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
4179 
4180 	/*
4181 	 * Initialize primary planes, implicit planes for legacy IOCTLS.
4182 	 * Order is reversed to match iteration order in atomic check.
4183 	 */
4184 	for (i = (primary_planes - 1); i >= 0; i--) {
4185 		plane = &dm->dc->caps.planes[i];
4186 
4187 		if (initialize_plane(dm, mode_info, i,
4188 				     DRM_PLANE_TYPE_PRIMARY, plane)) {
4189 			DRM_ERROR("KMS: Failed to initialize primary plane\n");
4190 			goto fail;
4191 		}
4192 	}
4193 
4194 	/*
4195 	 * Initialize overlay planes, index starting after primary planes.
4196 	 * These planes have a higher DRM index than the primary planes since
4197 	 * they should be considered as having a higher z-order.
4198 	 * Order is reversed to match iteration order in atomic check.
4199 	 *
4200 	 * Only support DCN for now, and only expose one so we don't encourage
4201 	 * userspace to use up all the pipes.
4202 	 */
4203 	for (i = 0; i < dm->dc->caps.max_planes; ++i) {
4204 		struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
4205 
4206 		if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
4207 			continue;
4208 
4209 		if (!plane->blends_with_above || !plane->blends_with_below)
4210 			continue;
4211 
4212 		if (!plane->pixel_format_support.argb8888)
4213 			continue;
4214 
4215 		if (initialize_plane(dm, NULL, primary_planes + i,
4216 				     DRM_PLANE_TYPE_OVERLAY, plane)) {
4217 			DRM_ERROR("KMS: Failed to initialize overlay plane\n");
4218 			goto fail;
4219 		}
4220 
4221 		/* Only create one overlay plane. */
4222 		break;
4223 	}
4224 
4225 	for (i = 0; i < dm->dc->caps.max_streams; i++)
4226 		if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
4227 			DRM_ERROR("KMS: Failed to initialize crtc\n");
4228 			goto fail;
4229 		}
4230 
4231 	/* Use Outbox interrupt */
4232 	switch (adev->ip_versions[DCE_HWIP][0]) {
4233 	case IP_VERSION(3, 0, 0):
4234 	case IP_VERSION(3, 1, 2):
4235 	case IP_VERSION(3, 1, 3):
4236 	case IP_VERSION(3, 1, 5):
4237 	case IP_VERSION(3, 1, 6):
4238 	case IP_VERSION(2, 1, 0):
4239 		if (register_outbox_irq_handlers(dm->adev)) {
4240 			DRM_ERROR("DM: Failed to initialize IRQ\n");
4241 			goto fail;
4242 		}
4243 		break;
4244 	default:
4245 		DRM_DEBUG_KMS("Unsupported DCN IP version for outbox: 0x%X\n",
4246 			      adev->ip_versions[DCE_HWIP][0]);
4247 	}
4248 
4249 	/* Determine whether to enable PSR support by default. */
4250 	if (!(amdgpu_dc_debug_mask & DC_DISABLE_PSR)) {
4251 		switch (adev->ip_versions[DCE_HWIP][0]) {
4252 		case IP_VERSION(3, 1, 2):
4253 		case IP_VERSION(3, 1, 3):
4254 		case IP_VERSION(3, 1, 5):
4255 		case IP_VERSION(3, 1, 6):
4256 			psr_feature_enabled = true;
4257 			break;
4258 		default:
4259 			psr_feature_enabled = amdgpu_dc_feature_mask & DC_PSR_MASK;
4260 			break;
4261 		}
4262 	}
4263 
4264 	/* Disable vblank IRQs aggressively for power-saving. */
4265 	adev_to_drm(adev)->vblank_disable_immediate = true;
4266 
4267 	/* loops over all connectors on the board */
4268 	for (i = 0; i < link_cnt; i++) {
4269 		struct dc_link *link = NULL;
4270 
4271 		if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
4272 			DRM_ERROR(
4273 				"KMS: Cannot support more than %d display indexes\n",
4274 					AMDGPU_DM_MAX_DISPLAY_INDEX);
4275 			continue;
4276 		}
4277 
4278 		aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
4279 		if (!aconnector)
4280 			goto fail;
4281 
4282 		aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
4283 		if (!aencoder)
4284 			goto fail;
4285 
4286 		if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
4287 			DRM_ERROR("KMS: Failed to initialize encoder\n");
4288 			goto fail;
4289 		}
4290 
4291 		if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
4292 			DRM_ERROR("KMS: Failed to initialize connector\n");
4293 			goto fail;
4294 		}
4295 
4296 		link = dc_get_link_at_index(dm->dc, i);
4297 
4298 		if (!dc_link_detect_sink(link, &new_connection_type))
4299 			DRM_ERROR("KMS: Failed to detect connector\n");
4300 
4301 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
4302 			emulated_link_detect(link);
4303 			amdgpu_dm_update_connector_after_detect(aconnector);
4304 
4305 		} else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
4306 			amdgpu_dm_update_connector_after_detect(aconnector);
4307 			register_backlight_device(dm, link);
4308 			if (dm->num_of_edps)
4309 				update_connector_ext_caps(aconnector);
4310 			if (psr_feature_enabled)
4311 				amdgpu_dm_set_psr_caps(link);
4312 
4313 			/* TODO: Fix vblank control helpers to delay PSR entry to allow this when
4314 			 * PSR is also supported.
4315 			 */
4316 			if (link->psr_settings.psr_feature_enabled)
4317 				adev_to_drm(adev)->vblank_disable_immediate = false;
4318 		}
4319 
4320 
4321 	}
4322 
4323 	/* Software is initialized. Now we can register interrupt handlers. */
4324 	switch (adev->asic_type) {
4325 #if defined(CONFIG_DRM_AMD_DC_SI)
4326 	case CHIP_TAHITI:
4327 	case CHIP_PITCAIRN:
4328 	case CHIP_VERDE:
4329 	case CHIP_OLAND:
4330 		if (dce60_register_irq_handlers(dm->adev)) {
4331 			DRM_ERROR("DM: Failed to initialize IRQ\n");
4332 			goto fail;
4333 		}
4334 		break;
4335 #endif
4336 	case CHIP_BONAIRE:
4337 	case CHIP_HAWAII:
4338 	case CHIP_KAVERI:
4339 	case CHIP_KABINI:
4340 	case CHIP_MULLINS:
4341 	case CHIP_TONGA:
4342 	case CHIP_FIJI:
4343 	case CHIP_CARRIZO:
4344 	case CHIP_STONEY:
4345 	case CHIP_POLARIS11:
4346 	case CHIP_POLARIS10:
4347 	case CHIP_POLARIS12:
4348 	case CHIP_VEGAM:
4349 	case CHIP_VEGA10:
4350 	case CHIP_VEGA12:
4351 	case CHIP_VEGA20:
4352 		if (dce110_register_irq_handlers(dm->adev)) {
4353 			DRM_ERROR("DM: Failed to initialize IRQ\n");
4354 			goto fail;
4355 		}
4356 		break;
4357 	default:
4358 		switch (adev->ip_versions[DCE_HWIP][0]) {
4359 		case IP_VERSION(1, 0, 0):
4360 		case IP_VERSION(1, 0, 1):
4361 		case IP_VERSION(2, 0, 2):
4362 		case IP_VERSION(2, 0, 3):
4363 		case IP_VERSION(2, 0, 0):
4364 		case IP_VERSION(2, 1, 0):
4365 		case IP_VERSION(3, 0, 0):
4366 		case IP_VERSION(3, 0, 2):
4367 		case IP_VERSION(3, 0, 3):
4368 		case IP_VERSION(3, 0, 1):
4369 		case IP_VERSION(3, 1, 2):
4370 		case IP_VERSION(3, 1, 3):
4371 		case IP_VERSION(3, 1, 5):
4372 		case IP_VERSION(3, 1, 6):
4373 			if (dcn10_register_irq_handlers(dm->adev)) {
4374 				DRM_ERROR("DM: Failed to initialize IRQ\n");
4375 				goto fail;
4376 			}
4377 			break;
4378 		default:
4379 			DRM_ERROR("Unsupported DCE IP versions: 0x%X\n",
4380 					adev->ip_versions[DCE_HWIP][0]);
4381 			goto fail;
4382 		}
4383 		break;
4384 	}
4385 
4386 	return 0;
4387 fail:
4388 	kfree(aencoder);
4389 	kfree(aconnector);
4390 
4391 	return -EINVAL;
4392 }
4393 
4394 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
4395 {
4396 	drm_atomic_private_obj_fini(&dm->atomic_obj);
4397 	return;
4398 }
4399 
4400 /******************************************************************************
4401  * amdgpu_display_funcs functions
4402  *****************************************************************************/
4403 
4404 /*
4405  * dm_bandwidth_update - program display watermarks
4406  *
4407  * @adev: amdgpu_device pointer
4408  *
4409  * Calculate and program the display watermarks and line buffer allocation.
4410  */
4411 static void dm_bandwidth_update(struct amdgpu_device *adev)
4412 {
4413 	/* TODO: implement later */
4414 }
4415 
4416 static const struct amdgpu_display_funcs dm_display_funcs = {
4417 	.bandwidth_update = dm_bandwidth_update, /* called unconditionally */
4418 	.vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
4419 	.backlight_set_level = NULL, /* never called for DC */
4420 	.backlight_get_level = NULL, /* never called for DC */
4421 	.hpd_sense = NULL,/* called unconditionally */
4422 	.hpd_set_polarity = NULL, /* called unconditionally */
4423 	.hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
4424 	.page_flip_get_scanoutpos =
4425 		dm_crtc_get_scanoutpos,/* called unconditionally */
4426 	.add_encoder = NULL, /* VBIOS parsing. DAL does it. */
4427 	.add_connector = NULL, /* VBIOS parsing. DAL does it. */
4428 };
4429 
4430 #if defined(CONFIG_DEBUG_KERNEL_DC)
4431 
4432 static ssize_t s3_debug_store(struct device *device,
4433 			      struct device_attribute *attr,
4434 			      const char *buf,
4435 			      size_t count)
4436 {
4437 	int ret;
4438 	int s3_state;
4439 	struct drm_device *drm_dev = dev_get_drvdata(device);
4440 	struct amdgpu_device *adev = drm_to_adev(drm_dev);
4441 
4442 	ret = kstrtoint(buf, 0, &s3_state);
4443 
4444 	if (ret == 0) {
4445 		if (s3_state) {
4446 			dm_resume(adev);
4447 			drm_kms_helper_hotplug_event(adev_to_drm(adev));
4448 		} else
4449 			dm_suspend(adev);
4450 	}
4451 
4452 	return ret == 0 ? count : 0;
4453 }
4454 
4455 DEVICE_ATTR_WO(s3_debug);
4456 
4457 #endif
4458 
4459 static int dm_early_init(void *handle)
4460 {
4461 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4462 
4463 	switch (adev->asic_type) {
4464 #if defined(CONFIG_DRM_AMD_DC_SI)
4465 	case CHIP_TAHITI:
4466 	case CHIP_PITCAIRN:
4467 	case CHIP_VERDE:
4468 		adev->mode_info.num_crtc = 6;
4469 		adev->mode_info.num_hpd = 6;
4470 		adev->mode_info.num_dig = 6;
4471 		break;
4472 	case CHIP_OLAND:
4473 		adev->mode_info.num_crtc = 2;
4474 		adev->mode_info.num_hpd = 2;
4475 		adev->mode_info.num_dig = 2;
4476 		break;
4477 #endif
4478 	case CHIP_BONAIRE:
4479 	case CHIP_HAWAII:
4480 		adev->mode_info.num_crtc = 6;
4481 		adev->mode_info.num_hpd = 6;
4482 		adev->mode_info.num_dig = 6;
4483 		break;
4484 	case CHIP_KAVERI:
4485 		adev->mode_info.num_crtc = 4;
4486 		adev->mode_info.num_hpd = 6;
4487 		adev->mode_info.num_dig = 7;
4488 		break;
4489 	case CHIP_KABINI:
4490 	case CHIP_MULLINS:
4491 		adev->mode_info.num_crtc = 2;
4492 		adev->mode_info.num_hpd = 6;
4493 		adev->mode_info.num_dig = 6;
4494 		break;
4495 	case CHIP_FIJI:
4496 	case CHIP_TONGA:
4497 		adev->mode_info.num_crtc = 6;
4498 		adev->mode_info.num_hpd = 6;
4499 		adev->mode_info.num_dig = 7;
4500 		break;
4501 	case CHIP_CARRIZO:
4502 		adev->mode_info.num_crtc = 3;
4503 		adev->mode_info.num_hpd = 6;
4504 		adev->mode_info.num_dig = 9;
4505 		break;
4506 	case CHIP_STONEY:
4507 		adev->mode_info.num_crtc = 2;
4508 		adev->mode_info.num_hpd = 6;
4509 		adev->mode_info.num_dig = 9;
4510 		break;
4511 	case CHIP_POLARIS11:
4512 	case CHIP_POLARIS12:
4513 		adev->mode_info.num_crtc = 5;
4514 		adev->mode_info.num_hpd = 5;
4515 		adev->mode_info.num_dig = 5;
4516 		break;
4517 	case CHIP_POLARIS10:
4518 	case CHIP_VEGAM:
4519 		adev->mode_info.num_crtc = 6;
4520 		adev->mode_info.num_hpd = 6;
4521 		adev->mode_info.num_dig = 6;
4522 		break;
4523 	case CHIP_VEGA10:
4524 	case CHIP_VEGA12:
4525 	case CHIP_VEGA20:
4526 		adev->mode_info.num_crtc = 6;
4527 		adev->mode_info.num_hpd = 6;
4528 		adev->mode_info.num_dig = 6;
4529 		break;
4530 	default:
4531 
4532 		switch (adev->ip_versions[DCE_HWIP][0]) {
4533 		case IP_VERSION(2, 0, 2):
4534 		case IP_VERSION(3, 0, 0):
4535 			adev->mode_info.num_crtc = 6;
4536 			adev->mode_info.num_hpd = 6;
4537 			adev->mode_info.num_dig = 6;
4538 			break;
4539 		case IP_VERSION(2, 0, 0):
4540 		case IP_VERSION(3, 0, 2):
4541 			adev->mode_info.num_crtc = 5;
4542 			adev->mode_info.num_hpd = 5;
4543 			adev->mode_info.num_dig = 5;
4544 			break;
4545 		case IP_VERSION(2, 0, 3):
4546 		case IP_VERSION(3, 0, 3):
4547 			adev->mode_info.num_crtc = 2;
4548 			adev->mode_info.num_hpd = 2;
4549 			adev->mode_info.num_dig = 2;
4550 			break;
4551 		case IP_VERSION(1, 0, 0):
4552 		case IP_VERSION(1, 0, 1):
4553 		case IP_VERSION(3, 0, 1):
4554 		case IP_VERSION(2, 1, 0):
4555 		case IP_VERSION(3, 1, 2):
4556 		case IP_VERSION(3, 1, 3):
4557 		case IP_VERSION(3, 1, 5):
4558 		case IP_VERSION(3, 1, 6):
4559 			adev->mode_info.num_crtc = 4;
4560 			adev->mode_info.num_hpd = 4;
4561 			adev->mode_info.num_dig = 4;
4562 			break;
4563 		default:
4564 			DRM_ERROR("Unsupported DCE IP versions: 0x%x\n",
4565 					adev->ip_versions[DCE_HWIP][0]);
4566 			return -EINVAL;
4567 		}
4568 		break;
4569 	}
4570 
4571 	amdgpu_dm_set_irq_funcs(adev);
4572 
4573 	if (adev->mode_info.funcs == NULL)
4574 		adev->mode_info.funcs = &dm_display_funcs;
4575 
4576 	/*
4577 	 * Note: Do NOT change adev->audio_endpt_rreg and
4578 	 * adev->audio_endpt_wreg because they are initialised in
4579 	 * amdgpu_device_init()
4580 	 */
4581 #if defined(CONFIG_DEBUG_KERNEL_DC)
4582 	device_create_file(
4583 		adev_to_drm(adev)->dev,
4584 		&dev_attr_s3_debug);
4585 #endif
4586 
4587 	return 0;
4588 }
4589 
4590 static bool modeset_required(struct drm_crtc_state *crtc_state,
4591 			     struct dc_stream_state *new_stream,
4592 			     struct dc_stream_state *old_stream)
4593 {
4594 	return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4595 }
4596 
4597 static bool modereset_required(struct drm_crtc_state *crtc_state)
4598 {
4599 	return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4600 }
4601 
4602 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
4603 {
4604 	drm_encoder_cleanup(encoder);
4605 	kfree(encoder);
4606 }
4607 
4608 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
4609 	.destroy = amdgpu_dm_encoder_destroy,
4610 };
4611 
4612 
4613 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
4614 					 struct drm_framebuffer *fb,
4615 					 int *min_downscale, int *max_upscale)
4616 {
4617 	struct amdgpu_device *adev = drm_to_adev(dev);
4618 	struct dc *dc = adev->dm.dc;
4619 	/* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
4620 	struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
4621 
4622 	switch (fb->format->format) {
4623 	case DRM_FORMAT_P010:
4624 	case DRM_FORMAT_NV12:
4625 	case DRM_FORMAT_NV21:
4626 		*max_upscale = plane_cap->max_upscale_factor.nv12;
4627 		*min_downscale = plane_cap->max_downscale_factor.nv12;
4628 		break;
4629 
4630 	case DRM_FORMAT_XRGB16161616F:
4631 	case DRM_FORMAT_ARGB16161616F:
4632 	case DRM_FORMAT_XBGR16161616F:
4633 	case DRM_FORMAT_ABGR16161616F:
4634 		*max_upscale = plane_cap->max_upscale_factor.fp16;
4635 		*min_downscale = plane_cap->max_downscale_factor.fp16;
4636 		break;
4637 
4638 	default:
4639 		*max_upscale = plane_cap->max_upscale_factor.argb8888;
4640 		*min_downscale = plane_cap->max_downscale_factor.argb8888;
4641 		break;
4642 	}
4643 
4644 	/*
4645 	 * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
4646 	 * scaling factor of 1.0 == 1000 units.
4647 	 */
4648 	if (*max_upscale == 1)
4649 		*max_upscale = 1000;
4650 
4651 	if (*min_downscale == 1)
4652 		*min_downscale = 1000;
4653 }
4654 
4655 
4656 static int fill_dc_scaling_info(struct amdgpu_device *adev,
4657 				const struct drm_plane_state *state,
4658 				struct dc_scaling_info *scaling_info)
4659 {
4660 	int scale_w, scale_h, min_downscale, max_upscale;
4661 
4662 	memset(scaling_info, 0, sizeof(*scaling_info));
4663 
4664 	/* Source is fixed 16.16 but we ignore mantissa for now... */
4665 	scaling_info->src_rect.x = state->src_x >> 16;
4666 	scaling_info->src_rect.y = state->src_y >> 16;
4667 
4668 	/*
4669 	 * For reasons we don't (yet) fully understand a non-zero
4670 	 * src_y coordinate into an NV12 buffer can cause a
4671 	 * system hang on DCN1x.
4672 	 * To avoid hangs (and maybe be overly cautious)
4673 	 * let's reject both non-zero src_x and src_y.
4674 	 *
4675 	 * We currently know of only one use-case to reproduce a
4676 	 * scenario with non-zero src_x and src_y for NV12, which
4677 	 * is to gesture the YouTube Android app into full screen
4678 	 * on ChromeOS.
4679 	 */
4680 	if (((adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 0)) ||
4681 	    (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 1))) &&
4682 	    (state->fb && state->fb->format->format == DRM_FORMAT_NV12 &&
4683 	    (scaling_info->src_rect.x != 0 || scaling_info->src_rect.y != 0)))
4684 		return -EINVAL;
4685 
4686 	scaling_info->src_rect.width = state->src_w >> 16;
4687 	if (scaling_info->src_rect.width == 0)
4688 		return -EINVAL;
4689 
4690 	scaling_info->src_rect.height = state->src_h >> 16;
4691 	if (scaling_info->src_rect.height == 0)
4692 		return -EINVAL;
4693 
4694 	scaling_info->dst_rect.x = state->crtc_x;
4695 	scaling_info->dst_rect.y = state->crtc_y;
4696 
4697 	if (state->crtc_w == 0)
4698 		return -EINVAL;
4699 
4700 	scaling_info->dst_rect.width = state->crtc_w;
4701 
4702 	if (state->crtc_h == 0)
4703 		return -EINVAL;
4704 
4705 	scaling_info->dst_rect.height = state->crtc_h;
4706 
4707 	/* DRM doesn't specify clipping on destination output. */
4708 	scaling_info->clip_rect = scaling_info->dst_rect;
4709 
4710 	/* Validate scaling per-format with DC plane caps */
4711 	if (state->plane && state->plane->dev && state->fb) {
4712 		get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
4713 					     &min_downscale, &max_upscale);
4714 	} else {
4715 		min_downscale = 250;
4716 		max_upscale = 16000;
4717 	}
4718 
4719 	scale_w = scaling_info->dst_rect.width * 1000 /
4720 		  scaling_info->src_rect.width;
4721 
4722 	if (scale_w < min_downscale || scale_w > max_upscale)
4723 		return -EINVAL;
4724 
4725 	scale_h = scaling_info->dst_rect.height * 1000 /
4726 		  scaling_info->src_rect.height;
4727 
4728 	if (scale_h < min_downscale || scale_h > max_upscale)
4729 		return -EINVAL;
4730 
4731 	/*
4732 	 * The "scaling_quality" can be ignored for now, quality = 0 has DC
4733 	 * assume reasonable defaults based on the format.
4734 	 */
4735 
4736 	return 0;
4737 }
4738 
4739 static void
4740 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
4741 				 uint64_t tiling_flags)
4742 {
4743 	/* Fill GFX8 params */
4744 	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
4745 		unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
4746 
4747 		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
4748 		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
4749 		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
4750 		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
4751 		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
4752 
4753 		/* XXX fix me for VI */
4754 		tiling_info->gfx8.num_banks = num_banks;
4755 		tiling_info->gfx8.array_mode =
4756 				DC_ARRAY_2D_TILED_THIN1;
4757 		tiling_info->gfx8.tile_split = tile_split;
4758 		tiling_info->gfx8.bank_width = bankw;
4759 		tiling_info->gfx8.bank_height = bankh;
4760 		tiling_info->gfx8.tile_aspect = mtaspect;
4761 		tiling_info->gfx8.tile_mode =
4762 				DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4763 	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4764 			== DC_ARRAY_1D_TILED_THIN1) {
4765 		tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
4766 	}
4767 
4768 	tiling_info->gfx8.pipe_config =
4769 			AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
4770 }
4771 
4772 static void
4773 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4774 				  union dc_tiling_info *tiling_info)
4775 {
4776 	tiling_info->gfx9.num_pipes =
4777 		adev->gfx.config.gb_addr_config_fields.num_pipes;
4778 	tiling_info->gfx9.num_banks =
4779 		adev->gfx.config.gb_addr_config_fields.num_banks;
4780 	tiling_info->gfx9.pipe_interleave =
4781 		adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4782 	tiling_info->gfx9.num_shader_engines =
4783 		adev->gfx.config.gb_addr_config_fields.num_se;
4784 	tiling_info->gfx9.max_compressed_frags =
4785 		adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4786 	tiling_info->gfx9.num_rb_per_se =
4787 		adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4788 	tiling_info->gfx9.shaderEnable = 1;
4789 	if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
4790 		tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
4791 }
4792 
4793 static int
4794 validate_dcc(struct amdgpu_device *adev,
4795 	     const enum surface_pixel_format format,
4796 	     const enum dc_rotation_angle rotation,
4797 	     const union dc_tiling_info *tiling_info,
4798 	     const struct dc_plane_dcc_param *dcc,
4799 	     const struct dc_plane_address *address,
4800 	     const struct plane_size *plane_size)
4801 {
4802 	struct dc *dc = adev->dm.dc;
4803 	struct dc_dcc_surface_param input;
4804 	struct dc_surface_dcc_cap output;
4805 
4806 	memset(&input, 0, sizeof(input));
4807 	memset(&output, 0, sizeof(output));
4808 
4809 	if (!dcc->enable)
4810 		return 0;
4811 
4812 	if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4813 	    !dc->cap_funcs.get_dcc_compression_cap)
4814 		return -EINVAL;
4815 
4816 	input.format = format;
4817 	input.surface_size.width = plane_size->surface_size.width;
4818 	input.surface_size.height = plane_size->surface_size.height;
4819 	input.swizzle_mode = tiling_info->gfx9.swizzle;
4820 
4821 	if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
4822 		input.scan = SCAN_DIRECTION_HORIZONTAL;
4823 	else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
4824 		input.scan = SCAN_DIRECTION_VERTICAL;
4825 
4826 	if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
4827 		return -EINVAL;
4828 
4829 	if (!output.capable)
4830 		return -EINVAL;
4831 
4832 	if (dcc->independent_64b_blks == 0 &&
4833 	    output.grph.rgb.independent_64b_blks != 0)
4834 		return -EINVAL;
4835 
4836 	return 0;
4837 }
4838 
4839 static bool
4840 modifier_has_dcc(uint64_t modifier)
4841 {
4842 	return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4843 }
4844 
4845 static unsigned
4846 modifier_gfx9_swizzle_mode(uint64_t modifier)
4847 {
4848 	if (modifier == DRM_FORMAT_MOD_LINEAR)
4849 		return 0;
4850 
4851 	return AMD_FMT_MOD_GET(TILE, modifier);
4852 }
4853 
4854 static const struct drm_format_info *
4855 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4856 {
4857 	return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
4858 }
4859 
4860 static void
4861 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4862 				    union dc_tiling_info *tiling_info,
4863 				    uint64_t modifier)
4864 {
4865 	unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4866 	unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4867 	unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4868 	unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4869 
4870 	fill_gfx9_tiling_info_from_device(adev, tiling_info);
4871 
4872 	if (!IS_AMD_FMT_MOD(modifier))
4873 		return;
4874 
4875 	tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4876 	tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4877 
4878 	if (adev->family >= AMDGPU_FAMILY_NV) {
4879 		tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4880 	} else {
4881 		tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4882 
4883 		/* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4884 	}
4885 }
4886 
4887 enum dm_micro_swizzle {
4888 	MICRO_SWIZZLE_Z = 0,
4889 	MICRO_SWIZZLE_S = 1,
4890 	MICRO_SWIZZLE_D = 2,
4891 	MICRO_SWIZZLE_R = 3
4892 };
4893 
4894 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4895 					  uint32_t format,
4896 					  uint64_t modifier)
4897 {
4898 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
4899 	const struct drm_format_info *info = drm_format_info(format);
4900 	int i;
4901 
4902 	enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4903 
4904 	if (!info)
4905 		return false;
4906 
4907 	/*
4908 	 * We always have to allow these modifiers:
4909 	 * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers.
4910 	 * 2. Not passing any modifiers is the same as explicitly passing INVALID.
4911 	 */
4912 	if (modifier == DRM_FORMAT_MOD_LINEAR ||
4913 	    modifier == DRM_FORMAT_MOD_INVALID) {
4914 		return true;
4915 	}
4916 
4917 	/* Check that the modifier is on the list of the plane's supported modifiers. */
4918 	for (i = 0; i < plane->modifier_count; i++) {
4919 		if (modifier == plane->modifiers[i])
4920 			break;
4921 	}
4922 	if (i == plane->modifier_count)
4923 		return false;
4924 
4925 	/*
4926 	 * For D swizzle the canonical modifier depends on the bpp, so check
4927 	 * it here.
4928 	 */
4929 	if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4930 	    adev->family >= AMDGPU_FAMILY_NV) {
4931 		if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4932 			return false;
4933 	}
4934 
4935 	if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4936 	    info->cpp[0] < 8)
4937 		return false;
4938 
4939 	if (modifier_has_dcc(modifier)) {
4940 		/* Per radeonsi comments 16/64 bpp are more complicated. */
4941 		if (info->cpp[0] != 4)
4942 			return false;
4943 		/* We support multi-planar formats, but not when combined with
4944 		 * additional DCC metadata planes. */
4945 		if (info->num_planes > 1)
4946 			return false;
4947 	}
4948 
4949 	return true;
4950 }
4951 
4952 static void
4953 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4954 {
4955 	if (!*mods)
4956 		return;
4957 
4958 	if (*cap - *size < 1) {
4959 		uint64_t new_cap = *cap * 2;
4960 		uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4961 
4962 		if (!new_mods) {
4963 			kfree(*mods);
4964 			*mods = NULL;
4965 			return;
4966 		}
4967 
4968 		memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4969 		kfree(*mods);
4970 		*mods = new_mods;
4971 		*cap = new_cap;
4972 	}
4973 
4974 	(*mods)[*size] = mod;
4975 	*size += 1;
4976 }
4977 
4978 static void
4979 add_gfx9_modifiers(const struct amdgpu_device *adev,
4980 		   uint64_t **mods, uint64_t *size, uint64_t *capacity)
4981 {
4982 	int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4983 	int pipe_xor_bits = min(8, pipes +
4984 				ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4985 	int bank_xor_bits = min(8 - pipe_xor_bits,
4986 				ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4987 	int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4988 		 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4989 
4990 
4991 	if (adev->family == AMDGPU_FAMILY_RV) {
4992 		/* Raven2 and later */
4993 		bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4994 
4995 		/*
4996 		 * No _D DCC swizzles yet because we only allow 32bpp, which
4997 		 * doesn't support _D on DCN
4998 		 */
4999 
5000 		if (has_constant_encode) {
5001 			add_modifier(mods, size, capacity, AMD_FMT_MOD |
5002 				    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5003 				    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5004 				    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5005 				    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5006 				    AMD_FMT_MOD_SET(DCC, 1) |
5007 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5008 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5009 				    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
5010 		}
5011 
5012 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
5013 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5014 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5015 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5016 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5017 			    AMD_FMT_MOD_SET(DCC, 1) |
5018 			    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5019 			    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5020 			    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
5021 
5022 		if (has_constant_encode) {
5023 			add_modifier(mods, size, capacity, AMD_FMT_MOD |
5024 				    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5025 				    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5026 				    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5027 				    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5028 				    AMD_FMT_MOD_SET(DCC, 1) |
5029 				    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5030 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5031 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5032 
5033 				    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5034 				    AMD_FMT_MOD_SET(RB, rb) |
5035 				    AMD_FMT_MOD_SET(PIPE, pipes));
5036 		}
5037 
5038 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
5039 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5040 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5041 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5042 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5043 			    AMD_FMT_MOD_SET(DCC, 1) |
5044 			    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5045 			    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5046 			    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5047 			    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
5048 			    AMD_FMT_MOD_SET(RB, rb) |
5049 			    AMD_FMT_MOD_SET(PIPE, pipes));
5050 	}
5051 
5052 	/*
5053 	 * Only supported for 64bpp on Raven, will be filtered on format in
5054 	 * dm_plane_format_mod_supported.
5055 	 */
5056 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5057 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
5058 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5059 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5060 		    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
5061 
5062 	if (adev->family == AMDGPU_FAMILY_RV) {
5063 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
5064 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5065 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5066 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5067 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
5068 	}
5069 
5070 	/*
5071 	 * Only supported for 64bpp on Raven, will be filtered on format in
5072 	 * dm_plane_format_mod_supported.
5073 	 */
5074 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5075 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5076 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5077 
5078 	if (adev->family == AMDGPU_FAMILY_RV) {
5079 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
5080 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5081 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5082 	}
5083 }
5084 
5085 static void
5086 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
5087 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
5088 {
5089 	int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5090 
5091 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5092 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5093 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5094 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5095 		    AMD_FMT_MOD_SET(DCC, 1) |
5096 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5097 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5098 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5099 
5100 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5101 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5102 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5103 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5104 		    AMD_FMT_MOD_SET(DCC, 1) |
5105 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5106 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5107 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5108 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5109 
5110 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5111 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5112 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5113 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5114 
5115 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5116 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5117 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5118 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5119 
5120 
5121 	/* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5122 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5123 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5124 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5125 
5126 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5127 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5128 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5129 }
5130 
5131 static void
5132 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
5133 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
5134 {
5135 	int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5136 	int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
5137 
5138 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5139 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5140 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5141 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5142 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
5143 		    AMD_FMT_MOD_SET(DCC, 1) |
5144 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5145 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5146 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5147 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5148 
5149 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5150 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5151 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5152 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5153 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
5154 		    AMD_FMT_MOD_SET(DCC, 1) |
5155 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5156 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5157 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5158 
5159 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5160 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5161 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5162 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5163 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
5164 		    AMD_FMT_MOD_SET(DCC, 1) |
5165 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5166 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5167 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5168 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5169 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5170 
5171 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5172 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5173 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5174 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5175 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
5176 		    AMD_FMT_MOD_SET(DCC, 1) |
5177 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5178 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5179 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5180 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5181 
5182 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5183 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5184 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5185 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5186 		    AMD_FMT_MOD_SET(PACKERS, pkrs));
5187 
5188 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5189 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5190 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5191 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5192 		    AMD_FMT_MOD_SET(PACKERS, pkrs));
5193 
5194 	/* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5195 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5196 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5197 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5198 
5199 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5200 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5201 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5202 }
5203 
5204 static int
5205 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
5206 {
5207 	uint64_t size = 0, capacity = 128;
5208 	*mods = NULL;
5209 
5210 	/* We have not hooked up any pre-GFX9 modifiers. */
5211 	if (adev->family < AMDGPU_FAMILY_AI)
5212 		return 0;
5213 
5214 	*mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
5215 
5216 	if (plane_type == DRM_PLANE_TYPE_CURSOR) {
5217 		add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5218 		add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5219 		return *mods ? 0 : -ENOMEM;
5220 	}
5221 
5222 	switch (adev->family) {
5223 	case AMDGPU_FAMILY_AI:
5224 	case AMDGPU_FAMILY_RV:
5225 		add_gfx9_modifiers(adev, mods, &size, &capacity);
5226 		break;
5227 	case AMDGPU_FAMILY_NV:
5228 	case AMDGPU_FAMILY_VGH:
5229 	case AMDGPU_FAMILY_YC:
5230 	case AMDGPU_FAMILY_GC_10_3_6:
5231 	case AMDGPU_FAMILY_GC_10_3_7:
5232 		if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
5233 			add_gfx10_3_modifiers(adev, mods, &size, &capacity);
5234 		else
5235 			add_gfx10_1_modifiers(adev, mods, &size, &capacity);
5236 		break;
5237 	}
5238 
5239 	add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5240 
5241 	/* INVALID marks the end of the list. */
5242 	add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5243 
5244 	if (!*mods)
5245 		return -ENOMEM;
5246 
5247 	return 0;
5248 }
5249 
5250 static int
5251 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
5252 					  const struct amdgpu_framebuffer *afb,
5253 					  const enum surface_pixel_format format,
5254 					  const enum dc_rotation_angle rotation,
5255 					  const struct plane_size *plane_size,
5256 					  union dc_tiling_info *tiling_info,
5257 					  struct dc_plane_dcc_param *dcc,
5258 					  struct dc_plane_address *address,
5259 					  const bool force_disable_dcc)
5260 {
5261 	const uint64_t modifier = afb->base.modifier;
5262 	int ret = 0;
5263 
5264 	fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
5265 	tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
5266 
5267 	if (modifier_has_dcc(modifier) && !force_disable_dcc) {
5268 		uint64_t dcc_address = afb->address + afb->base.offsets[1];
5269 		bool independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
5270 		bool independent_128b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_128B, modifier);
5271 
5272 		dcc->enable = 1;
5273 		dcc->meta_pitch = afb->base.pitches[1];
5274 		dcc->independent_64b_blks = independent_64b_blks;
5275 		if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) {
5276 			if (independent_64b_blks && independent_128b_blks)
5277 				dcc->dcc_ind_blk = hubp_ind_block_64b_no_128bcl;
5278 			else if (independent_128b_blks)
5279 				dcc->dcc_ind_blk = hubp_ind_block_128b;
5280 			else if (independent_64b_blks && !independent_128b_blks)
5281 				dcc->dcc_ind_blk = hubp_ind_block_64b;
5282 			else
5283 				dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5284 		} else {
5285 			if (independent_64b_blks)
5286 				dcc->dcc_ind_blk = hubp_ind_block_64b;
5287 			else
5288 				dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5289 		}
5290 
5291 		address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
5292 		address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
5293 	}
5294 
5295 	ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
5296 	if (ret)
5297 		drm_dbg_kms(adev_to_drm(adev), "validate_dcc: returned error: %d\n", ret);
5298 
5299 	return ret;
5300 }
5301 
5302 static int
5303 fill_plane_buffer_attributes(struct amdgpu_device *adev,
5304 			     const struct amdgpu_framebuffer *afb,
5305 			     const enum surface_pixel_format format,
5306 			     const enum dc_rotation_angle rotation,
5307 			     const uint64_t tiling_flags,
5308 			     union dc_tiling_info *tiling_info,
5309 			     struct plane_size *plane_size,
5310 			     struct dc_plane_dcc_param *dcc,
5311 			     struct dc_plane_address *address,
5312 			     bool tmz_surface,
5313 			     bool force_disable_dcc)
5314 {
5315 	const struct drm_framebuffer *fb = &afb->base;
5316 	int ret;
5317 
5318 	memset(tiling_info, 0, sizeof(*tiling_info));
5319 	memset(plane_size, 0, sizeof(*plane_size));
5320 	memset(dcc, 0, sizeof(*dcc));
5321 	memset(address, 0, sizeof(*address));
5322 
5323 	address->tmz_surface = tmz_surface;
5324 
5325 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
5326 		uint64_t addr = afb->address + fb->offsets[0];
5327 
5328 		plane_size->surface_size.x = 0;
5329 		plane_size->surface_size.y = 0;
5330 		plane_size->surface_size.width = fb->width;
5331 		plane_size->surface_size.height = fb->height;
5332 		plane_size->surface_pitch =
5333 			fb->pitches[0] / fb->format->cpp[0];
5334 
5335 		address->type = PLN_ADDR_TYPE_GRAPHICS;
5336 		address->grph.addr.low_part = lower_32_bits(addr);
5337 		address->grph.addr.high_part = upper_32_bits(addr);
5338 	} else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
5339 		uint64_t luma_addr = afb->address + fb->offsets[0];
5340 		uint64_t chroma_addr = afb->address + fb->offsets[1];
5341 
5342 		plane_size->surface_size.x = 0;
5343 		plane_size->surface_size.y = 0;
5344 		plane_size->surface_size.width = fb->width;
5345 		plane_size->surface_size.height = fb->height;
5346 		plane_size->surface_pitch =
5347 			fb->pitches[0] / fb->format->cpp[0];
5348 
5349 		plane_size->chroma_size.x = 0;
5350 		plane_size->chroma_size.y = 0;
5351 		/* TODO: set these based on surface format */
5352 		plane_size->chroma_size.width = fb->width / 2;
5353 		plane_size->chroma_size.height = fb->height / 2;
5354 
5355 		plane_size->chroma_pitch =
5356 			fb->pitches[1] / fb->format->cpp[1];
5357 
5358 		address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
5359 		address->video_progressive.luma_addr.low_part =
5360 			lower_32_bits(luma_addr);
5361 		address->video_progressive.luma_addr.high_part =
5362 			upper_32_bits(luma_addr);
5363 		address->video_progressive.chroma_addr.low_part =
5364 			lower_32_bits(chroma_addr);
5365 		address->video_progressive.chroma_addr.high_part =
5366 			upper_32_bits(chroma_addr);
5367 	}
5368 
5369 	if (adev->family >= AMDGPU_FAMILY_AI) {
5370 		ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
5371 								rotation, plane_size,
5372 								tiling_info, dcc,
5373 								address,
5374 								force_disable_dcc);
5375 		if (ret)
5376 			return ret;
5377 	} else {
5378 		fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
5379 	}
5380 
5381 	return 0;
5382 }
5383 
5384 static void
5385 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
5386 			       bool *per_pixel_alpha, bool *pre_multiplied_alpha,
5387 			       bool *global_alpha, int *global_alpha_value)
5388 {
5389 	*per_pixel_alpha = false;
5390 	*pre_multiplied_alpha = true;
5391 	*global_alpha = false;
5392 	*global_alpha_value = 0xff;
5393 
5394 	if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
5395 		return;
5396 
5397 	if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI ||
5398 		plane_state->pixel_blend_mode == DRM_MODE_BLEND_COVERAGE) {
5399 		static const uint32_t alpha_formats[] = {
5400 			DRM_FORMAT_ARGB8888,
5401 			DRM_FORMAT_RGBA8888,
5402 			DRM_FORMAT_ABGR8888,
5403 		};
5404 		uint32_t format = plane_state->fb->format->format;
5405 		unsigned int i;
5406 
5407 		for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
5408 			if (format == alpha_formats[i]) {
5409 				*per_pixel_alpha = true;
5410 				break;
5411 			}
5412 		}
5413 
5414 		if (per_pixel_alpha && plane_state->pixel_blend_mode == DRM_MODE_BLEND_COVERAGE)
5415 			*pre_multiplied_alpha = false;
5416 	}
5417 
5418 	if (plane_state->alpha < 0xffff) {
5419 		*global_alpha = true;
5420 		*global_alpha_value = plane_state->alpha >> 8;
5421 	}
5422 }
5423 
5424 static int
5425 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
5426 			    const enum surface_pixel_format format,
5427 			    enum dc_color_space *color_space)
5428 {
5429 	bool full_range;
5430 
5431 	*color_space = COLOR_SPACE_SRGB;
5432 
5433 	/* DRM color properties only affect non-RGB formats. */
5434 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
5435 		return 0;
5436 
5437 	full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
5438 
5439 	switch (plane_state->color_encoding) {
5440 	case DRM_COLOR_YCBCR_BT601:
5441 		if (full_range)
5442 			*color_space = COLOR_SPACE_YCBCR601;
5443 		else
5444 			*color_space = COLOR_SPACE_YCBCR601_LIMITED;
5445 		break;
5446 
5447 	case DRM_COLOR_YCBCR_BT709:
5448 		if (full_range)
5449 			*color_space = COLOR_SPACE_YCBCR709;
5450 		else
5451 			*color_space = COLOR_SPACE_YCBCR709_LIMITED;
5452 		break;
5453 
5454 	case DRM_COLOR_YCBCR_BT2020:
5455 		if (full_range)
5456 			*color_space = COLOR_SPACE_2020_YCBCR;
5457 		else
5458 			return -EINVAL;
5459 		break;
5460 
5461 	default:
5462 		return -EINVAL;
5463 	}
5464 
5465 	return 0;
5466 }
5467 
5468 static int
5469 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
5470 			    const struct drm_plane_state *plane_state,
5471 			    const uint64_t tiling_flags,
5472 			    struct dc_plane_info *plane_info,
5473 			    struct dc_plane_address *address,
5474 			    bool tmz_surface,
5475 			    bool force_disable_dcc)
5476 {
5477 	const struct drm_framebuffer *fb = plane_state->fb;
5478 	const struct amdgpu_framebuffer *afb =
5479 		to_amdgpu_framebuffer(plane_state->fb);
5480 	int ret;
5481 
5482 	memset(plane_info, 0, sizeof(*plane_info));
5483 
5484 	switch (fb->format->format) {
5485 	case DRM_FORMAT_C8:
5486 		plane_info->format =
5487 			SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
5488 		break;
5489 	case DRM_FORMAT_RGB565:
5490 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
5491 		break;
5492 	case DRM_FORMAT_XRGB8888:
5493 	case DRM_FORMAT_ARGB8888:
5494 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
5495 		break;
5496 	case DRM_FORMAT_XRGB2101010:
5497 	case DRM_FORMAT_ARGB2101010:
5498 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
5499 		break;
5500 	case DRM_FORMAT_XBGR2101010:
5501 	case DRM_FORMAT_ABGR2101010:
5502 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
5503 		break;
5504 	case DRM_FORMAT_XBGR8888:
5505 	case DRM_FORMAT_ABGR8888:
5506 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
5507 		break;
5508 	case DRM_FORMAT_NV21:
5509 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
5510 		break;
5511 	case DRM_FORMAT_NV12:
5512 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
5513 		break;
5514 	case DRM_FORMAT_P010:
5515 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
5516 		break;
5517 	case DRM_FORMAT_XRGB16161616F:
5518 	case DRM_FORMAT_ARGB16161616F:
5519 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
5520 		break;
5521 	case DRM_FORMAT_XBGR16161616F:
5522 	case DRM_FORMAT_ABGR16161616F:
5523 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
5524 		break;
5525 	case DRM_FORMAT_XRGB16161616:
5526 	case DRM_FORMAT_ARGB16161616:
5527 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616;
5528 		break;
5529 	case DRM_FORMAT_XBGR16161616:
5530 	case DRM_FORMAT_ABGR16161616:
5531 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616;
5532 		break;
5533 	default:
5534 		DRM_ERROR(
5535 			"Unsupported screen format %p4cc\n",
5536 			&fb->format->format);
5537 		return -EINVAL;
5538 	}
5539 
5540 	switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
5541 	case DRM_MODE_ROTATE_0:
5542 		plane_info->rotation = ROTATION_ANGLE_0;
5543 		break;
5544 	case DRM_MODE_ROTATE_90:
5545 		plane_info->rotation = ROTATION_ANGLE_90;
5546 		break;
5547 	case DRM_MODE_ROTATE_180:
5548 		plane_info->rotation = ROTATION_ANGLE_180;
5549 		break;
5550 	case DRM_MODE_ROTATE_270:
5551 		plane_info->rotation = ROTATION_ANGLE_270;
5552 		break;
5553 	default:
5554 		plane_info->rotation = ROTATION_ANGLE_0;
5555 		break;
5556 	}
5557 
5558 	plane_info->visible = true;
5559 	plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
5560 
5561 	plane_info->layer_index = 0;
5562 
5563 	ret = fill_plane_color_attributes(plane_state, plane_info->format,
5564 					  &plane_info->color_space);
5565 	if (ret)
5566 		return ret;
5567 
5568 	ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
5569 					   plane_info->rotation, tiling_flags,
5570 					   &plane_info->tiling_info,
5571 					   &plane_info->plane_size,
5572 					   &plane_info->dcc, address, tmz_surface,
5573 					   force_disable_dcc);
5574 	if (ret)
5575 		return ret;
5576 
5577 	fill_blending_from_plane_state(
5578 		plane_state, &plane_info->per_pixel_alpha, &plane_info->pre_multiplied_alpha,
5579 		&plane_info->global_alpha, &plane_info->global_alpha_value);
5580 
5581 	return 0;
5582 }
5583 
5584 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
5585 				    struct dc_plane_state *dc_plane_state,
5586 				    struct drm_plane_state *plane_state,
5587 				    struct drm_crtc_state *crtc_state)
5588 {
5589 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
5590 	struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
5591 	struct dc_scaling_info scaling_info;
5592 	struct dc_plane_info plane_info;
5593 	int ret;
5594 	bool force_disable_dcc = false;
5595 
5596 	ret = fill_dc_scaling_info(adev, plane_state, &scaling_info);
5597 	if (ret)
5598 		return ret;
5599 
5600 	dc_plane_state->src_rect = scaling_info.src_rect;
5601 	dc_plane_state->dst_rect = scaling_info.dst_rect;
5602 	dc_plane_state->clip_rect = scaling_info.clip_rect;
5603 	dc_plane_state->scaling_quality = scaling_info.scaling_quality;
5604 
5605 	force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
5606 	ret = fill_dc_plane_info_and_addr(adev, plane_state,
5607 					  afb->tiling_flags,
5608 					  &plane_info,
5609 					  &dc_plane_state->address,
5610 					  afb->tmz_surface,
5611 					  force_disable_dcc);
5612 	if (ret)
5613 		return ret;
5614 
5615 	dc_plane_state->format = plane_info.format;
5616 	dc_plane_state->color_space = plane_info.color_space;
5617 	dc_plane_state->format = plane_info.format;
5618 	dc_plane_state->plane_size = plane_info.plane_size;
5619 	dc_plane_state->rotation = plane_info.rotation;
5620 	dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
5621 	dc_plane_state->stereo_format = plane_info.stereo_format;
5622 	dc_plane_state->tiling_info = plane_info.tiling_info;
5623 	dc_plane_state->visible = plane_info.visible;
5624 	dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
5625 	dc_plane_state->pre_multiplied_alpha = plane_info.pre_multiplied_alpha;
5626 	dc_plane_state->global_alpha = plane_info.global_alpha;
5627 	dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
5628 	dc_plane_state->dcc = plane_info.dcc;
5629 	dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
5630 	dc_plane_state->flip_int_enabled = true;
5631 
5632 	/*
5633 	 * Always set input transfer function, since plane state is refreshed
5634 	 * every time.
5635 	 */
5636 	ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
5637 	if (ret)
5638 		return ret;
5639 
5640 	return 0;
5641 }
5642 
5643 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
5644 					   const struct dm_connector_state *dm_state,
5645 					   struct dc_stream_state *stream)
5646 {
5647 	enum amdgpu_rmx_type rmx_type;
5648 
5649 	struct rect src = { 0 }; /* viewport in composition space*/
5650 	struct rect dst = { 0 }; /* stream addressable area */
5651 
5652 	/* no mode. nothing to be done */
5653 	if (!mode)
5654 		return;
5655 
5656 	/* Full screen scaling by default */
5657 	src.width = mode->hdisplay;
5658 	src.height = mode->vdisplay;
5659 	dst.width = stream->timing.h_addressable;
5660 	dst.height = stream->timing.v_addressable;
5661 
5662 	if (dm_state) {
5663 		rmx_type = dm_state->scaling;
5664 		if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
5665 			if (src.width * dst.height <
5666 					src.height * dst.width) {
5667 				/* height needs less upscaling/more downscaling */
5668 				dst.width = src.width *
5669 						dst.height / src.height;
5670 			} else {
5671 				/* width needs less upscaling/more downscaling */
5672 				dst.height = src.height *
5673 						dst.width / src.width;
5674 			}
5675 		} else if (rmx_type == RMX_CENTER) {
5676 			dst = src;
5677 		}
5678 
5679 		dst.x = (stream->timing.h_addressable - dst.width) / 2;
5680 		dst.y = (stream->timing.v_addressable - dst.height) / 2;
5681 
5682 		if (dm_state->underscan_enable) {
5683 			dst.x += dm_state->underscan_hborder / 2;
5684 			dst.y += dm_state->underscan_vborder / 2;
5685 			dst.width -= dm_state->underscan_hborder;
5686 			dst.height -= dm_state->underscan_vborder;
5687 		}
5688 	}
5689 
5690 	stream->src = src;
5691 	stream->dst = dst;
5692 
5693 	DRM_DEBUG_KMS("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
5694 		      dst.x, dst.y, dst.width, dst.height);
5695 
5696 }
5697 
5698 static enum dc_color_depth
5699 convert_color_depth_from_display_info(const struct drm_connector *connector,
5700 				      bool is_y420, int requested_bpc)
5701 {
5702 	uint8_t bpc;
5703 
5704 	if (is_y420) {
5705 		bpc = 8;
5706 
5707 		/* Cap display bpc based on HDMI 2.0 HF-VSDB */
5708 		if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
5709 			bpc = 16;
5710 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
5711 			bpc = 12;
5712 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
5713 			bpc = 10;
5714 	} else {
5715 		bpc = (uint8_t)connector->display_info.bpc;
5716 		/* Assume 8 bpc by default if no bpc is specified. */
5717 		bpc = bpc ? bpc : 8;
5718 	}
5719 
5720 	if (requested_bpc > 0) {
5721 		/*
5722 		 * Cap display bpc based on the user requested value.
5723 		 *
5724 		 * The value for state->max_bpc may not correctly updated
5725 		 * depending on when the connector gets added to the state
5726 		 * or if this was called outside of atomic check, so it
5727 		 * can't be used directly.
5728 		 */
5729 		bpc = min_t(u8, bpc, requested_bpc);
5730 
5731 		/* Round down to the nearest even number. */
5732 		bpc = bpc - (bpc & 1);
5733 	}
5734 
5735 	switch (bpc) {
5736 	case 0:
5737 		/*
5738 		 * Temporary Work around, DRM doesn't parse color depth for
5739 		 * EDID revision before 1.4
5740 		 * TODO: Fix edid parsing
5741 		 */
5742 		return COLOR_DEPTH_888;
5743 	case 6:
5744 		return COLOR_DEPTH_666;
5745 	case 8:
5746 		return COLOR_DEPTH_888;
5747 	case 10:
5748 		return COLOR_DEPTH_101010;
5749 	case 12:
5750 		return COLOR_DEPTH_121212;
5751 	case 14:
5752 		return COLOR_DEPTH_141414;
5753 	case 16:
5754 		return COLOR_DEPTH_161616;
5755 	default:
5756 		return COLOR_DEPTH_UNDEFINED;
5757 	}
5758 }
5759 
5760 static enum dc_aspect_ratio
5761 get_aspect_ratio(const struct drm_display_mode *mode_in)
5762 {
5763 	/* 1-1 mapping, since both enums follow the HDMI spec. */
5764 	return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
5765 }
5766 
5767 static enum dc_color_space
5768 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
5769 {
5770 	enum dc_color_space color_space = COLOR_SPACE_SRGB;
5771 
5772 	switch (dc_crtc_timing->pixel_encoding)	{
5773 	case PIXEL_ENCODING_YCBCR422:
5774 	case PIXEL_ENCODING_YCBCR444:
5775 	case PIXEL_ENCODING_YCBCR420:
5776 	{
5777 		/*
5778 		 * 27030khz is the separation point between HDTV and SDTV
5779 		 * according to HDMI spec, we use YCbCr709 and YCbCr601
5780 		 * respectively
5781 		 */
5782 		if (dc_crtc_timing->pix_clk_100hz > 270300) {
5783 			if (dc_crtc_timing->flags.Y_ONLY)
5784 				color_space =
5785 					COLOR_SPACE_YCBCR709_LIMITED;
5786 			else
5787 				color_space = COLOR_SPACE_YCBCR709;
5788 		} else {
5789 			if (dc_crtc_timing->flags.Y_ONLY)
5790 				color_space =
5791 					COLOR_SPACE_YCBCR601_LIMITED;
5792 			else
5793 				color_space = COLOR_SPACE_YCBCR601;
5794 		}
5795 
5796 	}
5797 	break;
5798 	case PIXEL_ENCODING_RGB:
5799 		color_space = COLOR_SPACE_SRGB;
5800 		break;
5801 
5802 	default:
5803 		WARN_ON(1);
5804 		break;
5805 	}
5806 
5807 	return color_space;
5808 }
5809 
5810 static bool adjust_colour_depth_from_display_info(
5811 	struct dc_crtc_timing *timing_out,
5812 	const struct drm_display_info *info)
5813 {
5814 	enum dc_color_depth depth = timing_out->display_color_depth;
5815 	int normalized_clk;
5816 	do {
5817 		normalized_clk = timing_out->pix_clk_100hz / 10;
5818 		/* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5819 		if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5820 			normalized_clk /= 2;
5821 		/* Adjusting pix clock following on HDMI spec based on colour depth */
5822 		switch (depth) {
5823 		case COLOR_DEPTH_888:
5824 			break;
5825 		case COLOR_DEPTH_101010:
5826 			normalized_clk = (normalized_clk * 30) / 24;
5827 			break;
5828 		case COLOR_DEPTH_121212:
5829 			normalized_clk = (normalized_clk * 36) / 24;
5830 			break;
5831 		case COLOR_DEPTH_161616:
5832 			normalized_clk = (normalized_clk * 48) / 24;
5833 			break;
5834 		default:
5835 			/* The above depths are the only ones valid for HDMI. */
5836 			return false;
5837 		}
5838 		if (normalized_clk <= info->max_tmds_clock) {
5839 			timing_out->display_color_depth = depth;
5840 			return true;
5841 		}
5842 	} while (--depth > COLOR_DEPTH_666);
5843 	return false;
5844 }
5845 
5846 static void fill_stream_properties_from_drm_display_mode(
5847 	struct dc_stream_state *stream,
5848 	const struct drm_display_mode *mode_in,
5849 	const struct drm_connector *connector,
5850 	const struct drm_connector_state *connector_state,
5851 	const struct dc_stream_state *old_stream,
5852 	int requested_bpc)
5853 {
5854 	struct dc_crtc_timing *timing_out = &stream->timing;
5855 	const struct drm_display_info *info = &connector->display_info;
5856 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5857 	struct hdmi_vendor_infoframe hv_frame;
5858 	struct hdmi_avi_infoframe avi_frame;
5859 
5860 	memset(&hv_frame, 0, sizeof(hv_frame));
5861 	memset(&avi_frame, 0, sizeof(avi_frame));
5862 
5863 	timing_out->h_border_left = 0;
5864 	timing_out->h_border_right = 0;
5865 	timing_out->v_border_top = 0;
5866 	timing_out->v_border_bottom = 0;
5867 	/* TODO: un-hardcode */
5868 	if (drm_mode_is_420_only(info, mode_in)
5869 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5870 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5871 	else if (drm_mode_is_420_also(info, mode_in)
5872 			&& aconnector->force_yuv420_output)
5873 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5874 	else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCBCR444)
5875 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5876 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5877 	else
5878 		timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5879 
5880 	timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5881 	timing_out->display_color_depth = convert_color_depth_from_display_info(
5882 		connector,
5883 		(timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5884 		requested_bpc);
5885 	timing_out->scan_type = SCANNING_TYPE_NODATA;
5886 	timing_out->hdmi_vic = 0;
5887 
5888 	if(old_stream) {
5889 		timing_out->vic = old_stream->timing.vic;
5890 		timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5891 		timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5892 	} else {
5893 		timing_out->vic = drm_match_cea_mode(mode_in);
5894 		if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5895 			timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5896 		if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5897 			timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5898 	}
5899 
5900 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5901 		drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5902 		timing_out->vic = avi_frame.video_code;
5903 		drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5904 		timing_out->hdmi_vic = hv_frame.vic;
5905 	}
5906 
5907 	if (is_freesync_video_mode(mode_in, aconnector)) {
5908 		timing_out->h_addressable = mode_in->hdisplay;
5909 		timing_out->h_total = mode_in->htotal;
5910 		timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5911 		timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5912 		timing_out->v_total = mode_in->vtotal;
5913 		timing_out->v_addressable = mode_in->vdisplay;
5914 		timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5915 		timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5916 		timing_out->pix_clk_100hz = mode_in->clock * 10;
5917 	} else {
5918 		timing_out->h_addressable = mode_in->crtc_hdisplay;
5919 		timing_out->h_total = mode_in->crtc_htotal;
5920 		timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5921 		timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5922 		timing_out->v_total = mode_in->crtc_vtotal;
5923 		timing_out->v_addressable = mode_in->crtc_vdisplay;
5924 		timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5925 		timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5926 		timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5927 	}
5928 
5929 	timing_out->aspect_ratio = get_aspect_ratio(mode_in);
5930 
5931 	stream->output_color_space = get_output_color_space(timing_out);
5932 
5933 	stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5934 	stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
5935 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5936 		if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5937 		    drm_mode_is_420_also(info, mode_in) &&
5938 		    timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5939 			timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5940 			adjust_colour_depth_from_display_info(timing_out, info);
5941 		}
5942 	}
5943 }
5944 
5945 static void fill_audio_info(struct audio_info *audio_info,
5946 			    const struct drm_connector *drm_connector,
5947 			    const struct dc_sink *dc_sink)
5948 {
5949 	int i = 0;
5950 	int cea_revision = 0;
5951 	const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5952 
5953 	audio_info->manufacture_id = edid_caps->manufacturer_id;
5954 	audio_info->product_id = edid_caps->product_id;
5955 
5956 	cea_revision = drm_connector->display_info.cea_rev;
5957 
5958 	strscpy(audio_info->display_name,
5959 		edid_caps->display_name,
5960 		AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
5961 
5962 	if (cea_revision >= 3) {
5963 		audio_info->mode_count = edid_caps->audio_mode_count;
5964 
5965 		for (i = 0; i < audio_info->mode_count; ++i) {
5966 			audio_info->modes[i].format_code =
5967 					(enum audio_format_code)
5968 					(edid_caps->audio_modes[i].format_code);
5969 			audio_info->modes[i].channel_count =
5970 					edid_caps->audio_modes[i].channel_count;
5971 			audio_info->modes[i].sample_rates.all =
5972 					edid_caps->audio_modes[i].sample_rate;
5973 			audio_info->modes[i].sample_size =
5974 					edid_caps->audio_modes[i].sample_size;
5975 		}
5976 	}
5977 
5978 	audio_info->flags.all = edid_caps->speaker_flags;
5979 
5980 	/* TODO: We only check for the progressive mode, check for interlace mode too */
5981 	if (drm_connector->latency_present[0]) {
5982 		audio_info->video_latency = drm_connector->video_latency[0];
5983 		audio_info->audio_latency = drm_connector->audio_latency[0];
5984 	}
5985 
5986 	/* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5987 
5988 }
5989 
5990 static void
5991 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5992 				      struct drm_display_mode *dst_mode)
5993 {
5994 	dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5995 	dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5996 	dst_mode->crtc_clock = src_mode->crtc_clock;
5997 	dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5998 	dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
5999 	dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
6000 	dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
6001 	dst_mode->crtc_htotal = src_mode->crtc_htotal;
6002 	dst_mode->crtc_hskew = src_mode->crtc_hskew;
6003 	dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
6004 	dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
6005 	dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
6006 	dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
6007 	dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
6008 }
6009 
6010 static void
6011 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
6012 					const struct drm_display_mode *native_mode,
6013 					bool scale_enabled)
6014 {
6015 	if (scale_enabled) {
6016 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
6017 	} else if (native_mode->clock == drm_mode->clock &&
6018 			native_mode->htotal == drm_mode->htotal &&
6019 			native_mode->vtotal == drm_mode->vtotal) {
6020 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
6021 	} else {
6022 		/* no scaling nor amdgpu inserted, no need to patch */
6023 	}
6024 }
6025 
6026 static struct dc_sink *
6027 create_fake_sink(struct amdgpu_dm_connector *aconnector)
6028 {
6029 	struct dc_sink_init_data sink_init_data = { 0 };
6030 	struct dc_sink *sink = NULL;
6031 	sink_init_data.link = aconnector->dc_link;
6032 	sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
6033 
6034 	sink = dc_sink_create(&sink_init_data);
6035 	if (!sink) {
6036 		DRM_ERROR("Failed to create sink!\n");
6037 		return NULL;
6038 	}
6039 	sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
6040 
6041 	return sink;
6042 }
6043 
6044 static void set_multisync_trigger_params(
6045 		struct dc_stream_state *stream)
6046 {
6047 	struct dc_stream_state *master = NULL;
6048 
6049 	if (stream->triggered_crtc_reset.enabled) {
6050 		master = stream->triggered_crtc_reset.event_source;
6051 		stream->triggered_crtc_reset.event =
6052 			master->timing.flags.VSYNC_POSITIVE_POLARITY ?
6053 			CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
6054 		stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
6055 	}
6056 }
6057 
6058 static void set_master_stream(struct dc_stream_state *stream_set[],
6059 			      int stream_count)
6060 {
6061 	int j, highest_rfr = 0, master_stream = 0;
6062 
6063 	for (j = 0;  j < stream_count; j++) {
6064 		if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
6065 			int refresh_rate = 0;
6066 
6067 			refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
6068 				(stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
6069 			if (refresh_rate > highest_rfr) {
6070 				highest_rfr = refresh_rate;
6071 				master_stream = j;
6072 			}
6073 		}
6074 	}
6075 	for (j = 0;  j < stream_count; j++) {
6076 		if (stream_set[j])
6077 			stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
6078 	}
6079 }
6080 
6081 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
6082 {
6083 	int i = 0;
6084 	struct dc_stream_state *stream;
6085 
6086 	if (context->stream_count < 2)
6087 		return;
6088 	for (i = 0; i < context->stream_count ; i++) {
6089 		if (!context->streams[i])
6090 			continue;
6091 		/*
6092 		 * TODO: add a function to read AMD VSDB bits and set
6093 		 * crtc_sync_master.multi_sync_enabled flag
6094 		 * For now it's set to false
6095 		 */
6096 	}
6097 
6098 	set_master_stream(context->streams, context->stream_count);
6099 
6100 	for (i = 0; i < context->stream_count ; i++) {
6101 		stream = context->streams[i];
6102 
6103 		if (!stream)
6104 			continue;
6105 
6106 		set_multisync_trigger_params(stream);
6107 	}
6108 }
6109 
6110 #if defined(CONFIG_DRM_AMD_DC_DCN)
6111 static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
6112 							struct dc_sink *sink, struct dc_stream_state *stream,
6113 							struct dsc_dec_dpcd_caps *dsc_caps)
6114 {
6115 	stream->timing.flags.DSC = 0;
6116 	dsc_caps->is_dsc_supported = false;
6117 
6118 	if (aconnector->dc_link && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT ||
6119 		sink->sink_signal == SIGNAL_TYPE_EDP)) {
6120 		if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE ||
6121 			sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
6122 			dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
6123 				aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
6124 				aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
6125 				dsc_caps);
6126 	}
6127 }
6128 
6129 static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector,
6130 				    struct dc_sink *sink, struct dc_stream_state *stream,
6131 				    struct dsc_dec_dpcd_caps *dsc_caps,
6132 				    uint32_t max_dsc_target_bpp_limit_override)
6133 {
6134 	const struct dc_link_settings *verified_link_cap = NULL;
6135 	uint32_t link_bw_in_kbps;
6136 	uint32_t edp_min_bpp_x16, edp_max_bpp_x16;
6137 	struct dc *dc = sink->ctx->dc;
6138 	struct dc_dsc_bw_range bw_range = {0};
6139 	struct dc_dsc_config dsc_cfg = {0};
6140 
6141 	verified_link_cap = dc_link_get_link_cap(stream->link);
6142 	link_bw_in_kbps = dc_link_bandwidth_kbps(stream->link, verified_link_cap);
6143 	edp_min_bpp_x16 = 8 * 16;
6144 	edp_max_bpp_x16 = 8 * 16;
6145 
6146 	if (edp_max_bpp_x16 > dsc_caps->edp_max_bits_per_pixel)
6147 		edp_max_bpp_x16 = dsc_caps->edp_max_bits_per_pixel;
6148 
6149 	if (edp_max_bpp_x16 < edp_min_bpp_x16)
6150 		edp_min_bpp_x16 = edp_max_bpp_x16;
6151 
6152 	if (dc_dsc_compute_bandwidth_range(dc->res_pool->dscs[0],
6153 				dc->debug.dsc_min_slice_height_override,
6154 				edp_min_bpp_x16, edp_max_bpp_x16,
6155 				dsc_caps,
6156 				&stream->timing,
6157 				&bw_range)) {
6158 
6159 		if (bw_range.max_kbps < link_bw_in_kbps) {
6160 			if (dc_dsc_compute_config(dc->res_pool->dscs[0],
6161 					dsc_caps,
6162 					dc->debug.dsc_min_slice_height_override,
6163 					max_dsc_target_bpp_limit_override,
6164 					0,
6165 					&stream->timing,
6166 					&dsc_cfg)) {
6167 				stream->timing.dsc_cfg = dsc_cfg;
6168 				stream->timing.flags.DSC = 1;
6169 				stream->timing.dsc_cfg.bits_per_pixel = edp_max_bpp_x16;
6170 			}
6171 			return;
6172 		}
6173 	}
6174 
6175 	if (dc_dsc_compute_config(dc->res_pool->dscs[0],
6176 				dsc_caps,
6177 				dc->debug.dsc_min_slice_height_override,
6178 				max_dsc_target_bpp_limit_override,
6179 				link_bw_in_kbps,
6180 				&stream->timing,
6181 				&dsc_cfg)) {
6182 		stream->timing.dsc_cfg = dsc_cfg;
6183 		stream->timing.flags.DSC = 1;
6184 	}
6185 }
6186 
6187 static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
6188 										struct dc_sink *sink, struct dc_stream_state *stream,
6189 										struct dsc_dec_dpcd_caps *dsc_caps)
6190 {
6191 	struct drm_connector *drm_connector = &aconnector->base;
6192 	uint32_t link_bandwidth_kbps;
6193 	uint32_t max_dsc_target_bpp_limit_override = 0;
6194 	struct dc *dc = sink->ctx->dc;
6195 	uint32_t max_supported_bw_in_kbps, timing_bw_in_kbps;
6196 	uint32_t dsc_max_supported_bw_in_kbps;
6197 
6198 	link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
6199 							dc_link_get_link_cap(aconnector->dc_link));
6200 
6201 	if (stream->link && stream->link->local_sink)
6202 		max_dsc_target_bpp_limit_override =
6203 			stream->link->local_sink->edid_caps.panel_patch.max_dsc_target_bpp_limit;
6204 
6205 	/* Set DSC policy according to dsc_clock_en */
6206 	dc_dsc_policy_set_enable_dsc_when_not_needed(
6207 		aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
6208 
6209 	if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_EDP && !dc->debug.disable_dsc_edp &&
6210 	    dc->caps.edp_dsc_support && aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE) {
6211 
6212 		apply_dsc_policy_for_edp(aconnector, sink, stream, dsc_caps, max_dsc_target_bpp_limit_override);
6213 
6214 	} else if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6215 		if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) {
6216 			if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
6217 						dsc_caps,
6218 						aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
6219 						max_dsc_target_bpp_limit_override,
6220 						link_bandwidth_kbps,
6221 						&stream->timing,
6222 						&stream->timing.dsc_cfg)) {
6223 				stream->timing.flags.DSC = 1;
6224 				DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n",
6225 								 __func__, drm_connector->name);
6226 			}
6227 		} else if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) {
6228 			timing_bw_in_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing);
6229 			max_supported_bw_in_kbps = link_bandwidth_kbps;
6230 			dsc_max_supported_bw_in_kbps = link_bandwidth_kbps;
6231 
6232 			if (timing_bw_in_kbps > max_supported_bw_in_kbps &&
6233 					max_supported_bw_in_kbps > 0 &&
6234 					dsc_max_supported_bw_in_kbps > 0)
6235 				if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
6236 						dsc_caps,
6237 						aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
6238 						max_dsc_target_bpp_limit_override,
6239 						dsc_max_supported_bw_in_kbps,
6240 						&stream->timing,
6241 						&stream->timing.dsc_cfg)) {
6242 					stream->timing.flags.DSC = 1;
6243 					DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from DP-HDMI PCON\n",
6244 									 __func__, drm_connector->name);
6245 				}
6246 		}
6247 	}
6248 
6249 	/* Overwrite the stream flag if DSC is enabled through debugfs */
6250 	if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
6251 		stream->timing.flags.DSC = 1;
6252 
6253 	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
6254 		stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
6255 
6256 	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
6257 		stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
6258 
6259 	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
6260 		stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
6261 }
6262 #endif /* CONFIG_DRM_AMD_DC_DCN */
6263 
6264 /**
6265  * DOC: FreeSync Video
6266  *
6267  * When a userspace application wants to play a video, the content follows a
6268  * standard format definition that usually specifies the FPS for that format.
6269  * The below list illustrates some video format and the expected FPS,
6270  * respectively:
6271  *
6272  * - TV/NTSC (23.976 FPS)
6273  * - Cinema (24 FPS)
6274  * - TV/PAL (25 FPS)
6275  * - TV/NTSC (29.97 FPS)
6276  * - TV/NTSC (30 FPS)
6277  * - Cinema HFR (48 FPS)
6278  * - TV/PAL (50 FPS)
6279  * - Commonly used (60 FPS)
6280  * - Multiples of 24 (48,72,96,120 FPS)
6281  *
6282  * The list of standards video format is not huge and can be added to the
6283  * connector modeset list beforehand. With that, userspace can leverage
6284  * FreeSync to extends the front porch in order to attain the target refresh
6285  * rate. Such a switch will happen seamlessly, without screen blanking or
6286  * reprogramming of the output in any other way. If the userspace requests a
6287  * modesetting change compatible with FreeSync modes that only differ in the
6288  * refresh rate, DC will skip the full update and avoid blink during the
6289  * transition. For example, the video player can change the modesetting from
6290  * 60Hz to 30Hz for playing TV/NTSC content when it goes full screen without
6291  * causing any display blink. This same concept can be applied to a mode
6292  * setting change.
6293  */
6294 static struct drm_display_mode *
6295 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
6296 			  bool use_probed_modes)
6297 {
6298 	struct drm_display_mode *m, *m_pref = NULL;
6299 	u16 current_refresh, highest_refresh;
6300 	struct list_head *list_head = use_probed_modes ?
6301 						    &aconnector->base.probed_modes :
6302 						    &aconnector->base.modes;
6303 
6304 	if (aconnector->freesync_vid_base.clock != 0)
6305 		return &aconnector->freesync_vid_base;
6306 
6307 	/* Find the preferred mode */
6308 	list_for_each_entry (m, list_head, head) {
6309 		if (m->type & DRM_MODE_TYPE_PREFERRED) {
6310 			m_pref = m;
6311 			break;
6312 		}
6313 	}
6314 
6315 	if (!m_pref) {
6316 		/* Probably an EDID with no preferred mode. Fallback to first entry */
6317 		m_pref = list_first_entry_or_null(
6318 			&aconnector->base.modes, struct drm_display_mode, head);
6319 		if (!m_pref) {
6320 			DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
6321 			return NULL;
6322 		}
6323 	}
6324 
6325 	highest_refresh = drm_mode_vrefresh(m_pref);
6326 
6327 	/*
6328 	 * Find the mode with highest refresh rate with same resolution.
6329 	 * For some monitors, preferred mode is not the mode with highest
6330 	 * supported refresh rate.
6331 	 */
6332 	list_for_each_entry (m, list_head, head) {
6333 		current_refresh  = drm_mode_vrefresh(m);
6334 
6335 		if (m->hdisplay == m_pref->hdisplay &&
6336 		    m->vdisplay == m_pref->vdisplay &&
6337 		    highest_refresh < current_refresh) {
6338 			highest_refresh = current_refresh;
6339 			m_pref = m;
6340 		}
6341 	}
6342 
6343 	drm_mode_copy(&aconnector->freesync_vid_base, m_pref);
6344 	return m_pref;
6345 }
6346 
6347 static bool is_freesync_video_mode(const struct drm_display_mode *mode,
6348 				   struct amdgpu_dm_connector *aconnector)
6349 {
6350 	struct drm_display_mode *high_mode;
6351 	int timing_diff;
6352 
6353 	high_mode = get_highest_refresh_rate_mode(aconnector, false);
6354 	if (!high_mode || !mode)
6355 		return false;
6356 
6357 	timing_diff = high_mode->vtotal - mode->vtotal;
6358 
6359 	if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
6360 	    high_mode->hdisplay != mode->hdisplay ||
6361 	    high_mode->vdisplay != mode->vdisplay ||
6362 	    high_mode->hsync_start != mode->hsync_start ||
6363 	    high_mode->hsync_end != mode->hsync_end ||
6364 	    high_mode->htotal != mode->htotal ||
6365 	    high_mode->hskew != mode->hskew ||
6366 	    high_mode->vscan != mode->vscan ||
6367 	    high_mode->vsync_start - mode->vsync_start != timing_diff ||
6368 	    high_mode->vsync_end - mode->vsync_end != timing_diff)
6369 		return false;
6370 	else
6371 		return true;
6372 }
6373 
6374 static struct dc_stream_state *
6375 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6376 		       const struct drm_display_mode *drm_mode,
6377 		       const struct dm_connector_state *dm_state,
6378 		       const struct dc_stream_state *old_stream,
6379 		       int requested_bpc)
6380 {
6381 	struct drm_display_mode *preferred_mode = NULL;
6382 	struct drm_connector *drm_connector;
6383 	const struct drm_connector_state *con_state =
6384 		dm_state ? &dm_state->base : NULL;
6385 	struct dc_stream_state *stream = NULL;
6386 	struct drm_display_mode mode = *drm_mode;
6387 	struct drm_display_mode saved_mode;
6388 	struct drm_display_mode *freesync_mode = NULL;
6389 	bool native_mode_found = false;
6390 	bool recalculate_timing = false;
6391 	bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
6392 	int mode_refresh;
6393 	int preferred_refresh = 0;
6394 #if defined(CONFIG_DRM_AMD_DC_DCN)
6395 	struct dsc_dec_dpcd_caps dsc_caps;
6396 #endif
6397 	struct dc_sink *sink = NULL;
6398 
6399 	memset(&saved_mode, 0, sizeof(saved_mode));
6400 
6401 	if (aconnector == NULL) {
6402 		DRM_ERROR("aconnector is NULL!\n");
6403 		return stream;
6404 	}
6405 
6406 	drm_connector = &aconnector->base;
6407 
6408 	if (!aconnector->dc_sink) {
6409 		sink = create_fake_sink(aconnector);
6410 		if (!sink)
6411 			return stream;
6412 	} else {
6413 		sink = aconnector->dc_sink;
6414 		dc_sink_retain(sink);
6415 	}
6416 
6417 	stream = dc_create_stream_for_sink(sink);
6418 
6419 	if (stream == NULL) {
6420 		DRM_ERROR("Failed to create stream for sink!\n");
6421 		goto finish;
6422 	}
6423 
6424 	stream->dm_stream_context = aconnector;
6425 
6426 	stream->timing.flags.LTE_340MCSC_SCRAMBLE =
6427 		drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
6428 
6429 	list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
6430 		/* Search for preferred mode */
6431 		if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
6432 			native_mode_found = true;
6433 			break;
6434 		}
6435 	}
6436 	if (!native_mode_found)
6437 		preferred_mode = list_first_entry_or_null(
6438 				&aconnector->base.modes,
6439 				struct drm_display_mode,
6440 				head);
6441 
6442 	mode_refresh = drm_mode_vrefresh(&mode);
6443 
6444 	if (preferred_mode == NULL) {
6445 		/*
6446 		 * This may not be an error, the use case is when we have no
6447 		 * usermode calls to reset and set mode upon hotplug. In this
6448 		 * case, we call set mode ourselves to restore the previous mode
6449 		 * and the modelist may not be filled in in time.
6450 		 */
6451 		DRM_DEBUG_DRIVER("No preferred mode found\n");
6452 	} else {
6453 		recalculate_timing = is_freesync_video_mode(&mode, aconnector);
6454 		if (recalculate_timing) {
6455 			freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
6456 			drm_mode_copy(&saved_mode, &mode);
6457 			drm_mode_copy(&mode, freesync_mode);
6458 		} else {
6459 			decide_crtc_timing_for_drm_display_mode(
6460 				&mode, preferred_mode, scale);
6461 
6462 			preferred_refresh = drm_mode_vrefresh(preferred_mode);
6463 		}
6464 	}
6465 
6466 	if (recalculate_timing)
6467 		drm_mode_set_crtcinfo(&saved_mode, 0);
6468 	else if (!dm_state)
6469 		drm_mode_set_crtcinfo(&mode, 0);
6470 
6471        /*
6472 	* If scaling is enabled and refresh rate didn't change
6473 	* we copy the vic and polarities of the old timings
6474 	*/
6475 	if (!scale || mode_refresh != preferred_refresh)
6476 		fill_stream_properties_from_drm_display_mode(
6477 			stream, &mode, &aconnector->base, con_state, NULL,
6478 			requested_bpc);
6479 	else
6480 		fill_stream_properties_from_drm_display_mode(
6481 			stream, &mode, &aconnector->base, con_state, old_stream,
6482 			requested_bpc);
6483 
6484 #if defined(CONFIG_DRM_AMD_DC_DCN)
6485 	/* SST DSC determination policy */
6486 	update_dsc_caps(aconnector, sink, stream, &dsc_caps);
6487 	if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported)
6488 		apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps);
6489 #endif
6490 
6491 	update_stream_scaling_settings(&mode, dm_state, stream);
6492 
6493 	fill_audio_info(
6494 		&stream->audio_info,
6495 		drm_connector,
6496 		sink);
6497 
6498 	update_stream_signal(stream, sink);
6499 
6500 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
6501 		mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
6502 
6503 	if (stream->link->psr_settings.psr_feature_enabled) {
6504 		//
6505 		// should decide stream support vsc sdp colorimetry capability
6506 		// before building vsc info packet
6507 		//
6508 		stream->use_vsc_sdp_for_colorimetry = false;
6509 		if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
6510 			stream->use_vsc_sdp_for_colorimetry =
6511 				aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
6512 		} else {
6513 			if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
6514 				stream->use_vsc_sdp_for_colorimetry = true;
6515 		}
6516 		mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space);
6517 		aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
6518 
6519 	}
6520 finish:
6521 	dc_sink_release(sink);
6522 
6523 	return stream;
6524 }
6525 
6526 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
6527 {
6528 	drm_crtc_cleanup(crtc);
6529 	kfree(crtc);
6530 }
6531 
6532 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
6533 				  struct drm_crtc_state *state)
6534 {
6535 	struct dm_crtc_state *cur = to_dm_crtc_state(state);
6536 
6537 	/* TODO Destroy dc_stream objects are stream object is flattened */
6538 	if (cur->stream)
6539 		dc_stream_release(cur->stream);
6540 
6541 
6542 	__drm_atomic_helper_crtc_destroy_state(state);
6543 
6544 
6545 	kfree(state);
6546 }
6547 
6548 static void dm_crtc_reset_state(struct drm_crtc *crtc)
6549 {
6550 	struct dm_crtc_state *state;
6551 
6552 	if (crtc->state)
6553 		dm_crtc_destroy_state(crtc, crtc->state);
6554 
6555 	state = kzalloc(sizeof(*state), GFP_KERNEL);
6556 	if (WARN_ON(!state))
6557 		return;
6558 
6559 	__drm_atomic_helper_crtc_reset(crtc, &state->base);
6560 }
6561 
6562 static struct drm_crtc_state *
6563 dm_crtc_duplicate_state(struct drm_crtc *crtc)
6564 {
6565 	struct dm_crtc_state *state, *cur;
6566 
6567 	cur = to_dm_crtc_state(crtc->state);
6568 
6569 	if (WARN_ON(!crtc->state))
6570 		return NULL;
6571 
6572 	state = kzalloc(sizeof(*state), GFP_KERNEL);
6573 	if (!state)
6574 		return NULL;
6575 
6576 	__drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
6577 
6578 	if (cur->stream) {
6579 		state->stream = cur->stream;
6580 		dc_stream_retain(state->stream);
6581 	}
6582 
6583 	state->active_planes = cur->active_planes;
6584 	state->vrr_infopacket = cur->vrr_infopacket;
6585 	state->abm_level = cur->abm_level;
6586 	state->vrr_supported = cur->vrr_supported;
6587 	state->freesync_config = cur->freesync_config;
6588 	state->cm_has_degamma = cur->cm_has_degamma;
6589 	state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
6590 	state->force_dpms_off = cur->force_dpms_off;
6591 	/* TODO Duplicate dc_stream after objects are stream object is flattened */
6592 
6593 	return &state->base;
6594 }
6595 
6596 #ifdef CONFIG_DEBUG_FS
6597 static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
6598 {
6599 	crtc_debugfs_init(crtc);
6600 
6601 	return 0;
6602 }
6603 #endif
6604 
6605 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
6606 {
6607 	enum dc_irq_source irq_source;
6608 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6609 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6610 	int rc;
6611 
6612 	irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
6613 
6614 	rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
6615 
6616 	DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
6617 		      acrtc->crtc_id, enable ? "en" : "dis", rc);
6618 	return rc;
6619 }
6620 
6621 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
6622 {
6623 	enum dc_irq_source irq_source;
6624 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6625 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6626 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
6627 	struct amdgpu_display_manager *dm = &adev->dm;
6628 	struct vblank_control_work *work;
6629 	int rc = 0;
6630 
6631 	if (enable) {
6632 		/* vblank irq on -> Only need vupdate irq in vrr mode */
6633 		if (amdgpu_dm_vrr_active(acrtc_state))
6634 			rc = dm_set_vupdate_irq(crtc, true);
6635 	} else {
6636 		/* vblank irq off -> vupdate irq off */
6637 		rc = dm_set_vupdate_irq(crtc, false);
6638 	}
6639 
6640 	if (rc)
6641 		return rc;
6642 
6643 	irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
6644 
6645 	if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
6646 		return -EBUSY;
6647 
6648 	if (amdgpu_in_reset(adev))
6649 		return 0;
6650 
6651 	if (dm->vblank_control_workqueue) {
6652 		work = kzalloc(sizeof(*work), GFP_ATOMIC);
6653 		if (!work)
6654 			return -ENOMEM;
6655 
6656 		INIT_WORK(&work->work, vblank_control_worker);
6657 		work->dm = dm;
6658 		work->acrtc = acrtc;
6659 		work->enable = enable;
6660 
6661 		if (acrtc_state->stream) {
6662 			dc_stream_retain(acrtc_state->stream);
6663 			work->stream = acrtc_state->stream;
6664 		}
6665 
6666 		queue_work(dm->vblank_control_workqueue, &work->work);
6667 	}
6668 
6669 	return 0;
6670 }
6671 
6672 static int dm_enable_vblank(struct drm_crtc *crtc)
6673 {
6674 	return dm_set_vblank(crtc, true);
6675 }
6676 
6677 static void dm_disable_vblank(struct drm_crtc *crtc)
6678 {
6679 	dm_set_vblank(crtc, false);
6680 }
6681 
6682 /* Implemented only the options currently availible for the driver */
6683 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
6684 	.reset = dm_crtc_reset_state,
6685 	.destroy = amdgpu_dm_crtc_destroy,
6686 	.set_config = drm_atomic_helper_set_config,
6687 	.page_flip = drm_atomic_helper_page_flip,
6688 	.atomic_duplicate_state = dm_crtc_duplicate_state,
6689 	.atomic_destroy_state = dm_crtc_destroy_state,
6690 	.set_crc_source = amdgpu_dm_crtc_set_crc_source,
6691 	.verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
6692 	.get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
6693 	.get_vblank_counter = amdgpu_get_vblank_counter_kms,
6694 	.enable_vblank = dm_enable_vblank,
6695 	.disable_vblank = dm_disable_vblank,
6696 	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
6697 #if defined(CONFIG_DEBUG_FS)
6698 	.late_register = amdgpu_dm_crtc_late_register,
6699 #endif
6700 };
6701 
6702 static enum drm_connector_status
6703 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
6704 {
6705 	bool connected;
6706 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6707 
6708 	/*
6709 	 * Notes:
6710 	 * 1. This interface is NOT called in context of HPD irq.
6711 	 * 2. This interface *is called* in context of user-mode ioctl. Which
6712 	 * makes it a bad place for *any* MST-related activity.
6713 	 */
6714 
6715 	if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
6716 	    !aconnector->fake_enable)
6717 		connected = (aconnector->dc_sink != NULL);
6718 	else
6719 		connected = (aconnector->base.force == DRM_FORCE_ON);
6720 
6721 	update_subconnector_property(aconnector);
6722 
6723 	return (connected ? connector_status_connected :
6724 			connector_status_disconnected);
6725 }
6726 
6727 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
6728 					    struct drm_connector_state *connector_state,
6729 					    struct drm_property *property,
6730 					    uint64_t val)
6731 {
6732 	struct drm_device *dev = connector->dev;
6733 	struct amdgpu_device *adev = drm_to_adev(dev);
6734 	struct dm_connector_state *dm_old_state =
6735 		to_dm_connector_state(connector->state);
6736 	struct dm_connector_state *dm_new_state =
6737 		to_dm_connector_state(connector_state);
6738 
6739 	int ret = -EINVAL;
6740 
6741 	if (property == dev->mode_config.scaling_mode_property) {
6742 		enum amdgpu_rmx_type rmx_type;
6743 
6744 		switch (val) {
6745 		case DRM_MODE_SCALE_CENTER:
6746 			rmx_type = RMX_CENTER;
6747 			break;
6748 		case DRM_MODE_SCALE_ASPECT:
6749 			rmx_type = RMX_ASPECT;
6750 			break;
6751 		case DRM_MODE_SCALE_FULLSCREEN:
6752 			rmx_type = RMX_FULL;
6753 			break;
6754 		case DRM_MODE_SCALE_NONE:
6755 		default:
6756 			rmx_type = RMX_OFF;
6757 			break;
6758 		}
6759 
6760 		if (dm_old_state->scaling == rmx_type)
6761 			return 0;
6762 
6763 		dm_new_state->scaling = rmx_type;
6764 		ret = 0;
6765 	} else if (property == adev->mode_info.underscan_hborder_property) {
6766 		dm_new_state->underscan_hborder = val;
6767 		ret = 0;
6768 	} else if (property == adev->mode_info.underscan_vborder_property) {
6769 		dm_new_state->underscan_vborder = val;
6770 		ret = 0;
6771 	} else if (property == adev->mode_info.underscan_property) {
6772 		dm_new_state->underscan_enable = val;
6773 		ret = 0;
6774 	} else if (property == adev->mode_info.abm_level_property) {
6775 		dm_new_state->abm_level = val;
6776 		ret = 0;
6777 	}
6778 
6779 	return ret;
6780 }
6781 
6782 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
6783 					    const struct drm_connector_state *state,
6784 					    struct drm_property *property,
6785 					    uint64_t *val)
6786 {
6787 	struct drm_device *dev = connector->dev;
6788 	struct amdgpu_device *adev = drm_to_adev(dev);
6789 	struct dm_connector_state *dm_state =
6790 		to_dm_connector_state(state);
6791 	int ret = -EINVAL;
6792 
6793 	if (property == dev->mode_config.scaling_mode_property) {
6794 		switch (dm_state->scaling) {
6795 		case RMX_CENTER:
6796 			*val = DRM_MODE_SCALE_CENTER;
6797 			break;
6798 		case RMX_ASPECT:
6799 			*val = DRM_MODE_SCALE_ASPECT;
6800 			break;
6801 		case RMX_FULL:
6802 			*val = DRM_MODE_SCALE_FULLSCREEN;
6803 			break;
6804 		case RMX_OFF:
6805 		default:
6806 			*val = DRM_MODE_SCALE_NONE;
6807 			break;
6808 		}
6809 		ret = 0;
6810 	} else if (property == adev->mode_info.underscan_hborder_property) {
6811 		*val = dm_state->underscan_hborder;
6812 		ret = 0;
6813 	} else if (property == adev->mode_info.underscan_vborder_property) {
6814 		*val = dm_state->underscan_vborder;
6815 		ret = 0;
6816 	} else if (property == adev->mode_info.underscan_property) {
6817 		*val = dm_state->underscan_enable;
6818 		ret = 0;
6819 	} else if (property == adev->mode_info.abm_level_property) {
6820 		*val = dm_state->abm_level;
6821 		ret = 0;
6822 	}
6823 
6824 	return ret;
6825 }
6826 
6827 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
6828 {
6829 	struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
6830 
6831 	drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
6832 }
6833 
6834 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
6835 {
6836 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6837 	const struct dc_link *link = aconnector->dc_link;
6838 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
6839 	struct amdgpu_display_manager *dm = &adev->dm;
6840 	int i;
6841 
6842 	/*
6843 	 * Call only if mst_mgr was iniitalized before since it's not done
6844 	 * for all connector types.
6845 	 */
6846 	if (aconnector->mst_mgr.dev)
6847 		drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
6848 
6849 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
6850 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
6851 	for (i = 0; i < dm->num_of_edps; i++) {
6852 		if ((link == dm->backlight_link[i]) && dm->backlight_dev[i]) {
6853 			backlight_device_unregister(dm->backlight_dev[i]);
6854 			dm->backlight_dev[i] = NULL;
6855 		}
6856 	}
6857 #endif
6858 
6859 	if (aconnector->dc_em_sink)
6860 		dc_sink_release(aconnector->dc_em_sink);
6861 	aconnector->dc_em_sink = NULL;
6862 	if (aconnector->dc_sink)
6863 		dc_sink_release(aconnector->dc_sink);
6864 	aconnector->dc_sink = NULL;
6865 
6866 	drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
6867 	drm_connector_unregister(connector);
6868 	drm_connector_cleanup(connector);
6869 	if (aconnector->i2c) {
6870 		i2c_del_adapter(&aconnector->i2c->base);
6871 		kfree(aconnector->i2c);
6872 	}
6873 	kfree(aconnector->dm_dp_aux.aux.name);
6874 
6875 	kfree(connector);
6876 }
6877 
6878 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
6879 {
6880 	struct dm_connector_state *state =
6881 		to_dm_connector_state(connector->state);
6882 
6883 	if (connector->state)
6884 		__drm_atomic_helper_connector_destroy_state(connector->state);
6885 
6886 	kfree(state);
6887 
6888 	state = kzalloc(sizeof(*state), GFP_KERNEL);
6889 
6890 	if (state) {
6891 		state->scaling = RMX_OFF;
6892 		state->underscan_enable = false;
6893 		state->underscan_hborder = 0;
6894 		state->underscan_vborder = 0;
6895 		state->base.max_requested_bpc = 8;
6896 		state->vcpi_slots = 0;
6897 		state->pbn = 0;
6898 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
6899 			state->abm_level = amdgpu_dm_abm_level;
6900 
6901 		__drm_atomic_helper_connector_reset(connector, &state->base);
6902 	}
6903 }
6904 
6905 struct drm_connector_state *
6906 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
6907 {
6908 	struct dm_connector_state *state =
6909 		to_dm_connector_state(connector->state);
6910 
6911 	struct dm_connector_state *new_state =
6912 			kmemdup(state, sizeof(*state), GFP_KERNEL);
6913 
6914 	if (!new_state)
6915 		return NULL;
6916 
6917 	__drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
6918 
6919 	new_state->freesync_capable = state->freesync_capable;
6920 	new_state->abm_level = state->abm_level;
6921 	new_state->scaling = state->scaling;
6922 	new_state->underscan_enable = state->underscan_enable;
6923 	new_state->underscan_hborder = state->underscan_hborder;
6924 	new_state->underscan_vborder = state->underscan_vborder;
6925 	new_state->vcpi_slots = state->vcpi_slots;
6926 	new_state->pbn = state->pbn;
6927 	return &new_state->base;
6928 }
6929 
6930 static int
6931 amdgpu_dm_connector_late_register(struct drm_connector *connector)
6932 {
6933 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6934 		to_amdgpu_dm_connector(connector);
6935 	int r;
6936 
6937 	if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
6938 	    (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
6939 		amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
6940 		r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
6941 		if (r)
6942 			return r;
6943 	}
6944 
6945 #if defined(CONFIG_DEBUG_FS)
6946 	connector_debugfs_init(amdgpu_dm_connector);
6947 #endif
6948 
6949 	return 0;
6950 }
6951 
6952 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
6953 	.reset = amdgpu_dm_connector_funcs_reset,
6954 	.detect = amdgpu_dm_connector_detect,
6955 	.fill_modes = drm_helper_probe_single_connector_modes,
6956 	.destroy = amdgpu_dm_connector_destroy,
6957 	.atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
6958 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6959 	.atomic_set_property = amdgpu_dm_connector_atomic_set_property,
6960 	.atomic_get_property = amdgpu_dm_connector_atomic_get_property,
6961 	.late_register = amdgpu_dm_connector_late_register,
6962 	.early_unregister = amdgpu_dm_connector_unregister
6963 };
6964 
6965 static int get_modes(struct drm_connector *connector)
6966 {
6967 	return amdgpu_dm_connector_get_modes(connector);
6968 }
6969 
6970 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
6971 {
6972 	struct dc_sink_init_data init_params = {
6973 			.link = aconnector->dc_link,
6974 			.sink_signal = SIGNAL_TYPE_VIRTUAL
6975 	};
6976 	struct edid *edid;
6977 
6978 	if (!aconnector->base.edid_blob_ptr) {
6979 		DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
6980 				aconnector->base.name);
6981 
6982 		aconnector->base.force = DRM_FORCE_OFF;
6983 		aconnector->base.override_edid = false;
6984 		return;
6985 	}
6986 
6987 	edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
6988 
6989 	aconnector->edid = edid;
6990 
6991 	aconnector->dc_em_sink = dc_link_add_remote_sink(
6992 		aconnector->dc_link,
6993 		(uint8_t *)edid,
6994 		(edid->extensions + 1) * EDID_LENGTH,
6995 		&init_params);
6996 
6997 	if (aconnector->base.force == DRM_FORCE_ON) {
6998 		aconnector->dc_sink = aconnector->dc_link->local_sink ?
6999 		aconnector->dc_link->local_sink :
7000 		aconnector->dc_em_sink;
7001 		dc_sink_retain(aconnector->dc_sink);
7002 	}
7003 }
7004 
7005 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
7006 {
7007 	struct dc_link *link = (struct dc_link *)aconnector->dc_link;
7008 
7009 	/*
7010 	 * In case of headless boot with force on for DP managed connector
7011 	 * Those settings have to be != 0 to get initial modeset
7012 	 */
7013 	if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
7014 		link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
7015 		link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
7016 	}
7017 
7018 
7019 	aconnector->base.override_edid = true;
7020 	create_eml_sink(aconnector);
7021 }
7022 
7023 struct dc_stream_state *
7024 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
7025 				const struct drm_display_mode *drm_mode,
7026 				const struct dm_connector_state *dm_state,
7027 				const struct dc_stream_state *old_stream)
7028 {
7029 	struct drm_connector *connector = &aconnector->base;
7030 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
7031 	struct dc_stream_state *stream;
7032 	const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
7033 	int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
7034 	enum dc_status dc_result = DC_OK;
7035 
7036 	do {
7037 		stream = create_stream_for_sink(aconnector, drm_mode,
7038 						dm_state, old_stream,
7039 						requested_bpc);
7040 		if (stream == NULL) {
7041 			DRM_ERROR("Failed to create stream for sink!\n");
7042 			break;
7043 		}
7044 
7045 		dc_result = dc_validate_stream(adev->dm.dc, stream);
7046 
7047 		if (dc_result != DC_OK) {
7048 			DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
7049 				      drm_mode->hdisplay,
7050 				      drm_mode->vdisplay,
7051 				      drm_mode->clock,
7052 				      dc_result,
7053 				      dc_status_to_str(dc_result));
7054 
7055 			dc_stream_release(stream);
7056 			stream = NULL;
7057 			requested_bpc -= 2; /* lower bpc to retry validation */
7058 		}
7059 
7060 	} while (stream == NULL && requested_bpc >= 6);
7061 
7062 	if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
7063 		DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
7064 
7065 		aconnector->force_yuv420_output = true;
7066 		stream = create_validate_stream_for_sink(aconnector, drm_mode,
7067 						dm_state, old_stream);
7068 		aconnector->force_yuv420_output = false;
7069 	}
7070 
7071 	return stream;
7072 }
7073 
7074 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
7075 				   struct drm_display_mode *mode)
7076 {
7077 	int result = MODE_ERROR;
7078 	struct dc_sink *dc_sink;
7079 	/* TODO: Unhardcode stream count */
7080 	struct dc_stream_state *stream;
7081 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7082 
7083 	if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
7084 			(mode->flags & DRM_MODE_FLAG_DBLSCAN))
7085 		return result;
7086 
7087 	/*
7088 	 * Only run this the first time mode_valid is called to initilialize
7089 	 * EDID mgmt
7090 	 */
7091 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
7092 		!aconnector->dc_em_sink)
7093 		handle_edid_mgmt(aconnector);
7094 
7095 	dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
7096 
7097 	if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
7098 				aconnector->base.force != DRM_FORCE_ON) {
7099 		DRM_ERROR("dc_sink is NULL!\n");
7100 		goto fail;
7101 	}
7102 
7103 	stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
7104 	if (stream) {
7105 		dc_stream_release(stream);
7106 		result = MODE_OK;
7107 	}
7108 
7109 fail:
7110 	/* TODO: error handling*/
7111 	return result;
7112 }
7113 
7114 static int fill_hdr_info_packet(const struct drm_connector_state *state,
7115 				struct dc_info_packet *out)
7116 {
7117 	struct hdmi_drm_infoframe frame;
7118 	unsigned char buf[30]; /* 26 + 4 */
7119 	ssize_t len;
7120 	int ret, i;
7121 
7122 	memset(out, 0, sizeof(*out));
7123 
7124 	if (!state->hdr_output_metadata)
7125 		return 0;
7126 
7127 	ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
7128 	if (ret)
7129 		return ret;
7130 
7131 	len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
7132 	if (len < 0)
7133 		return (int)len;
7134 
7135 	/* Static metadata is a fixed 26 bytes + 4 byte header. */
7136 	if (len != 30)
7137 		return -EINVAL;
7138 
7139 	/* Prepare the infopacket for DC. */
7140 	switch (state->connector->connector_type) {
7141 	case DRM_MODE_CONNECTOR_HDMIA:
7142 		out->hb0 = 0x87; /* type */
7143 		out->hb1 = 0x01; /* version */
7144 		out->hb2 = 0x1A; /* length */
7145 		out->sb[0] = buf[3]; /* checksum */
7146 		i = 1;
7147 		break;
7148 
7149 	case DRM_MODE_CONNECTOR_DisplayPort:
7150 	case DRM_MODE_CONNECTOR_eDP:
7151 		out->hb0 = 0x00; /* sdp id, zero */
7152 		out->hb1 = 0x87; /* type */
7153 		out->hb2 = 0x1D; /* payload len - 1 */
7154 		out->hb3 = (0x13 << 2); /* sdp version */
7155 		out->sb[0] = 0x01; /* version */
7156 		out->sb[1] = 0x1A; /* length */
7157 		i = 2;
7158 		break;
7159 
7160 	default:
7161 		return -EINVAL;
7162 	}
7163 
7164 	memcpy(&out->sb[i], &buf[4], 26);
7165 	out->valid = true;
7166 
7167 	print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
7168 		       sizeof(out->sb), false);
7169 
7170 	return 0;
7171 }
7172 
7173 static int
7174 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
7175 				 struct drm_atomic_state *state)
7176 {
7177 	struct drm_connector_state *new_con_state =
7178 		drm_atomic_get_new_connector_state(state, conn);
7179 	struct drm_connector_state *old_con_state =
7180 		drm_atomic_get_old_connector_state(state, conn);
7181 	struct drm_crtc *crtc = new_con_state->crtc;
7182 	struct drm_crtc_state *new_crtc_state;
7183 	int ret;
7184 
7185 	trace_amdgpu_dm_connector_atomic_check(new_con_state);
7186 
7187 	if (!crtc)
7188 		return 0;
7189 
7190 	if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
7191 		struct dc_info_packet hdr_infopacket;
7192 
7193 		ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
7194 		if (ret)
7195 			return ret;
7196 
7197 		new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
7198 		if (IS_ERR(new_crtc_state))
7199 			return PTR_ERR(new_crtc_state);
7200 
7201 		/*
7202 		 * DC considers the stream backends changed if the
7203 		 * static metadata changes. Forcing the modeset also
7204 		 * gives a simple way for userspace to switch from
7205 		 * 8bpc to 10bpc when setting the metadata to enter
7206 		 * or exit HDR.
7207 		 *
7208 		 * Changing the static metadata after it's been
7209 		 * set is permissible, however. So only force a
7210 		 * modeset if we're entering or exiting HDR.
7211 		 */
7212 		new_crtc_state->mode_changed =
7213 			!old_con_state->hdr_output_metadata ||
7214 			!new_con_state->hdr_output_metadata;
7215 	}
7216 
7217 	return 0;
7218 }
7219 
7220 static const struct drm_connector_helper_funcs
7221 amdgpu_dm_connector_helper_funcs = {
7222 	/*
7223 	 * If hotplugging a second bigger display in FB Con mode, bigger resolution
7224 	 * modes will be filtered by drm_mode_validate_size(), and those modes
7225 	 * are missing after user start lightdm. So we need to renew modes list.
7226 	 * in get_modes call back, not just return the modes count
7227 	 */
7228 	.get_modes = get_modes,
7229 	.mode_valid = amdgpu_dm_connector_mode_valid,
7230 	.atomic_check = amdgpu_dm_connector_atomic_check,
7231 };
7232 
7233 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
7234 {
7235 }
7236 
7237 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
7238 {
7239 	struct drm_atomic_state *state = new_crtc_state->state;
7240 	struct drm_plane *plane;
7241 	int num_active = 0;
7242 
7243 	drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
7244 		struct drm_plane_state *new_plane_state;
7245 
7246 		/* Cursor planes are "fake". */
7247 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
7248 			continue;
7249 
7250 		new_plane_state = drm_atomic_get_new_plane_state(state, plane);
7251 
7252 		if (!new_plane_state) {
7253 			/*
7254 			 * The plane is enable on the CRTC and hasn't changed
7255 			 * state. This means that it previously passed
7256 			 * validation and is therefore enabled.
7257 			 */
7258 			num_active += 1;
7259 			continue;
7260 		}
7261 
7262 		/* We need a framebuffer to be considered enabled. */
7263 		num_active += (new_plane_state->fb != NULL);
7264 	}
7265 
7266 	return num_active;
7267 }
7268 
7269 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
7270 					 struct drm_crtc_state *new_crtc_state)
7271 {
7272 	struct dm_crtc_state *dm_new_crtc_state =
7273 		to_dm_crtc_state(new_crtc_state);
7274 
7275 	dm_new_crtc_state->active_planes = 0;
7276 
7277 	if (!dm_new_crtc_state->stream)
7278 		return;
7279 
7280 	dm_new_crtc_state->active_planes =
7281 		count_crtc_active_planes(new_crtc_state);
7282 }
7283 
7284 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
7285 				       struct drm_atomic_state *state)
7286 {
7287 	struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
7288 									  crtc);
7289 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
7290 	struct dc *dc = adev->dm.dc;
7291 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
7292 	int ret = -EINVAL;
7293 
7294 	trace_amdgpu_dm_crtc_atomic_check(crtc_state);
7295 
7296 	dm_update_crtc_active_planes(crtc, crtc_state);
7297 
7298 	if (WARN_ON(unlikely(!dm_crtc_state->stream &&
7299 		     modeset_required(crtc_state, NULL, dm_crtc_state->stream)))) {
7300 		return ret;
7301 	}
7302 
7303 	/*
7304 	 * We require the primary plane to be enabled whenever the CRTC is, otherwise
7305 	 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
7306 	 * planes are disabled, which is not supported by the hardware. And there is legacy
7307 	 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
7308 	 */
7309 	if (crtc_state->enable &&
7310 	    !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
7311 		DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
7312 		return -EINVAL;
7313 	}
7314 
7315 	/* In some use cases, like reset, no stream is attached */
7316 	if (!dm_crtc_state->stream)
7317 		return 0;
7318 
7319 	if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
7320 		return 0;
7321 
7322 	DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
7323 	return ret;
7324 }
7325 
7326 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
7327 				      const struct drm_display_mode *mode,
7328 				      struct drm_display_mode *adjusted_mode)
7329 {
7330 	return true;
7331 }
7332 
7333 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
7334 	.disable = dm_crtc_helper_disable,
7335 	.atomic_check = dm_crtc_helper_atomic_check,
7336 	.mode_fixup = dm_crtc_helper_mode_fixup,
7337 	.get_scanout_position = amdgpu_crtc_get_scanout_position,
7338 };
7339 
7340 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
7341 {
7342 
7343 }
7344 
7345 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
7346 {
7347 	switch (display_color_depth) {
7348 		case COLOR_DEPTH_666:
7349 			return 6;
7350 		case COLOR_DEPTH_888:
7351 			return 8;
7352 		case COLOR_DEPTH_101010:
7353 			return 10;
7354 		case COLOR_DEPTH_121212:
7355 			return 12;
7356 		case COLOR_DEPTH_141414:
7357 			return 14;
7358 		case COLOR_DEPTH_161616:
7359 			return 16;
7360 		default:
7361 			break;
7362 		}
7363 	return 0;
7364 }
7365 
7366 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
7367 					  struct drm_crtc_state *crtc_state,
7368 					  struct drm_connector_state *conn_state)
7369 {
7370 	struct drm_atomic_state *state = crtc_state->state;
7371 	struct drm_connector *connector = conn_state->connector;
7372 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7373 	struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
7374 	const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
7375 	struct drm_dp_mst_topology_mgr *mst_mgr;
7376 	struct drm_dp_mst_port *mst_port;
7377 	enum dc_color_depth color_depth;
7378 	int clock, bpp = 0;
7379 	bool is_y420 = false;
7380 
7381 	if (!aconnector->port || !aconnector->dc_sink)
7382 		return 0;
7383 
7384 	mst_port = aconnector->port;
7385 	mst_mgr = &aconnector->mst_port->mst_mgr;
7386 
7387 	if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
7388 		return 0;
7389 
7390 	if (!state->duplicated) {
7391 		int max_bpc = conn_state->max_requested_bpc;
7392 		is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
7393 				aconnector->force_yuv420_output;
7394 		color_depth = convert_color_depth_from_display_info(connector,
7395 								    is_y420,
7396 								    max_bpc);
7397 		bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
7398 		clock = adjusted_mode->clock;
7399 		dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
7400 	}
7401 	dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
7402 									   mst_mgr,
7403 									   mst_port,
7404 									   dm_new_connector_state->pbn,
7405 									   dm_mst_get_pbn_divider(aconnector->dc_link));
7406 	if (dm_new_connector_state->vcpi_slots < 0) {
7407 		DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
7408 		return dm_new_connector_state->vcpi_slots;
7409 	}
7410 	return 0;
7411 }
7412 
7413 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
7414 	.disable = dm_encoder_helper_disable,
7415 	.atomic_check = dm_encoder_helper_atomic_check
7416 };
7417 
7418 #if defined(CONFIG_DRM_AMD_DC_DCN)
7419 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
7420 					    struct dc_state *dc_state,
7421 					    struct dsc_mst_fairness_vars *vars)
7422 {
7423 	struct dc_stream_state *stream = NULL;
7424 	struct drm_connector *connector;
7425 	struct drm_connector_state *new_con_state;
7426 	struct amdgpu_dm_connector *aconnector;
7427 	struct dm_connector_state *dm_conn_state;
7428 	int i, j;
7429 	int vcpi, pbn_div, pbn, slot_num = 0;
7430 
7431 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
7432 
7433 		aconnector = to_amdgpu_dm_connector(connector);
7434 
7435 		if (!aconnector->port)
7436 			continue;
7437 
7438 		if (!new_con_state || !new_con_state->crtc)
7439 			continue;
7440 
7441 		dm_conn_state = to_dm_connector_state(new_con_state);
7442 
7443 		for (j = 0; j < dc_state->stream_count; j++) {
7444 			stream = dc_state->streams[j];
7445 			if (!stream)
7446 				continue;
7447 
7448 			if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
7449 				break;
7450 
7451 			stream = NULL;
7452 		}
7453 
7454 		if (!stream)
7455 			continue;
7456 
7457 		pbn_div = dm_mst_get_pbn_divider(stream->link);
7458 		/* pbn is calculated by compute_mst_dsc_configs_for_state*/
7459 		for (j = 0; j < dc_state->stream_count; j++) {
7460 			if (vars[j].aconnector == aconnector) {
7461 				pbn = vars[j].pbn;
7462 				break;
7463 			}
7464 		}
7465 
7466 		if (j == dc_state->stream_count)
7467 			continue;
7468 
7469 		slot_num = DIV_ROUND_UP(pbn, pbn_div);
7470 
7471 		if (stream->timing.flags.DSC != 1) {
7472 			dm_conn_state->pbn = pbn;
7473 			dm_conn_state->vcpi_slots = slot_num;
7474 
7475 			drm_dp_mst_atomic_enable_dsc(state,
7476 						     aconnector->port,
7477 						     dm_conn_state->pbn,
7478 						     0,
7479 						     false);
7480 			continue;
7481 		}
7482 
7483 		vcpi = drm_dp_mst_atomic_enable_dsc(state,
7484 						    aconnector->port,
7485 						    pbn, pbn_div,
7486 						    true);
7487 		if (vcpi < 0)
7488 			return vcpi;
7489 
7490 		dm_conn_state->pbn = pbn;
7491 		dm_conn_state->vcpi_slots = vcpi;
7492 	}
7493 	return 0;
7494 }
7495 #endif
7496 
7497 static void dm_drm_plane_reset(struct drm_plane *plane)
7498 {
7499 	struct dm_plane_state *amdgpu_state = NULL;
7500 
7501 	if (plane->state)
7502 		plane->funcs->atomic_destroy_state(plane, plane->state);
7503 
7504 	amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
7505 	WARN_ON(amdgpu_state == NULL);
7506 
7507 	if (amdgpu_state)
7508 		__drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
7509 }
7510 
7511 static struct drm_plane_state *
7512 dm_drm_plane_duplicate_state(struct drm_plane *plane)
7513 {
7514 	struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
7515 
7516 	old_dm_plane_state = to_dm_plane_state(plane->state);
7517 	dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
7518 	if (!dm_plane_state)
7519 		return NULL;
7520 
7521 	__drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
7522 
7523 	if (old_dm_plane_state->dc_state) {
7524 		dm_plane_state->dc_state = old_dm_plane_state->dc_state;
7525 		dc_plane_state_retain(dm_plane_state->dc_state);
7526 	}
7527 
7528 	return &dm_plane_state->base;
7529 }
7530 
7531 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
7532 				struct drm_plane_state *state)
7533 {
7534 	struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
7535 
7536 	if (dm_plane_state->dc_state)
7537 		dc_plane_state_release(dm_plane_state->dc_state);
7538 
7539 	drm_atomic_helper_plane_destroy_state(plane, state);
7540 }
7541 
7542 static const struct drm_plane_funcs dm_plane_funcs = {
7543 	.update_plane	= drm_atomic_helper_update_plane,
7544 	.disable_plane	= drm_atomic_helper_disable_plane,
7545 	.destroy	= drm_primary_helper_destroy,
7546 	.reset = dm_drm_plane_reset,
7547 	.atomic_duplicate_state = dm_drm_plane_duplicate_state,
7548 	.atomic_destroy_state = dm_drm_plane_destroy_state,
7549 	.format_mod_supported = dm_plane_format_mod_supported,
7550 };
7551 
7552 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
7553 				      struct drm_plane_state *new_state)
7554 {
7555 	struct amdgpu_framebuffer *afb;
7556 	struct drm_gem_object *obj;
7557 	struct amdgpu_device *adev;
7558 	struct amdgpu_bo *rbo;
7559 	struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
7560 	uint32_t domain;
7561 	int r;
7562 
7563 	if (!new_state->fb) {
7564 		DRM_DEBUG_KMS("No FB bound\n");
7565 		return 0;
7566 	}
7567 
7568 	afb = to_amdgpu_framebuffer(new_state->fb);
7569 	obj = new_state->fb->obj[0];
7570 	rbo = gem_to_amdgpu_bo(obj);
7571 	adev = amdgpu_ttm_adev(rbo->tbo.bdev);
7572 
7573 	r = amdgpu_bo_reserve(rbo, true);
7574 	if (r) {
7575 		dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
7576 		return r;
7577 	}
7578 
7579 	r = dma_resv_reserve_fences(rbo->tbo.base.resv, 1);
7580 	if (r) {
7581 		dev_err(adev->dev, "reserving fence slot failed (%d)\n", r);
7582 		goto error_unlock;
7583 	}
7584 
7585 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
7586 		domain = amdgpu_display_supported_domains(adev, rbo->flags);
7587 	else
7588 		domain = AMDGPU_GEM_DOMAIN_VRAM;
7589 
7590 	r = amdgpu_bo_pin(rbo, domain);
7591 	if (unlikely(r != 0)) {
7592 		if (r != -ERESTARTSYS)
7593 			DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
7594 		goto error_unlock;
7595 	}
7596 
7597 	r = amdgpu_ttm_alloc_gart(&rbo->tbo);
7598 	if (unlikely(r != 0)) {
7599 		DRM_ERROR("%p bind failed\n", rbo);
7600 		goto error_unpin;
7601 	}
7602 
7603 	r = drm_gem_plane_helper_prepare_fb(plane, new_state);
7604 	if (unlikely(r != 0))
7605 		goto error_unpin;
7606 
7607 	amdgpu_bo_unreserve(rbo);
7608 
7609 	afb->address = amdgpu_bo_gpu_offset(rbo);
7610 
7611 	amdgpu_bo_ref(rbo);
7612 
7613 	/**
7614 	 * We don't do surface updates on planes that have been newly created,
7615 	 * but we also don't have the afb->address during atomic check.
7616 	 *
7617 	 * Fill in buffer attributes depending on the address here, but only on
7618 	 * newly created planes since they're not being used by DC yet and this
7619 	 * won't modify global state.
7620 	 */
7621 	dm_plane_state_old = to_dm_plane_state(plane->state);
7622 	dm_plane_state_new = to_dm_plane_state(new_state);
7623 
7624 	if (dm_plane_state_new->dc_state &&
7625 	    dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
7626 		struct dc_plane_state *plane_state =
7627 			dm_plane_state_new->dc_state;
7628 		bool force_disable_dcc = !plane_state->dcc.enable;
7629 
7630 		fill_plane_buffer_attributes(
7631 			adev, afb, plane_state->format, plane_state->rotation,
7632 			afb->tiling_flags,
7633 			&plane_state->tiling_info, &plane_state->plane_size,
7634 			&plane_state->dcc, &plane_state->address,
7635 			afb->tmz_surface, force_disable_dcc);
7636 	}
7637 
7638 	return 0;
7639 
7640 error_unpin:
7641 	amdgpu_bo_unpin(rbo);
7642 
7643 error_unlock:
7644 	amdgpu_bo_unreserve(rbo);
7645 	return r;
7646 }
7647 
7648 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
7649 				       struct drm_plane_state *old_state)
7650 {
7651 	struct amdgpu_bo *rbo;
7652 	int r;
7653 
7654 	if (!old_state->fb)
7655 		return;
7656 
7657 	rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
7658 	r = amdgpu_bo_reserve(rbo, false);
7659 	if (unlikely(r)) {
7660 		DRM_ERROR("failed to reserve rbo before unpin\n");
7661 		return;
7662 	}
7663 
7664 	amdgpu_bo_unpin(rbo);
7665 	amdgpu_bo_unreserve(rbo);
7666 	amdgpu_bo_unref(&rbo);
7667 }
7668 
7669 static int dm_plane_helper_check_state(struct drm_plane_state *state,
7670 				       struct drm_crtc_state *new_crtc_state)
7671 {
7672 	struct drm_framebuffer *fb = state->fb;
7673 	int min_downscale, max_upscale;
7674 	int min_scale = 0;
7675 	int max_scale = INT_MAX;
7676 
7677 	/* Plane enabled? Validate viewport and get scaling factors from plane caps. */
7678 	if (fb && state->crtc) {
7679 		/* Validate viewport to cover the case when only the position changes */
7680 		if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
7681 			int viewport_width = state->crtc_w;
7682 			int viewport_height = state->crtc_h;
7683 
7684 			if (state->crtc_x < 0)
7685 				viewport_width += state->crtc_x;
7686 			else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
7687 				viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
7688 
7689 			if (state->crtc_y < 0)
7690 				viewport_height += state->crtc_y;
7691 			else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
7692 				viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
7693 
7694 			if (viewport_width < 0 || viewport_height < 0) {
7695 				DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
7696 				return -EINVAL;
7697 			} else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
7698 				DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
7699 				return -EINVAL;
7700 			} else if (viewport_height < MIN_VIEWPORT_SIZE) {
7701 				DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
7702 				return -EINVAL;
7703 			}
7704 
7705 		}
7706 
7707 		/* Get min/max allowed scaling factors from plane caps. */
7708 		get_min_max_dc_plane_scaling(state->crtc->dev, fb,
7709 					     &min_downscale, &max_upscale);
7710 		/*
7711 		 * Convert to drm convention: 16.16 fixed point, instead of dc's
7712 		 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
7713 		 * dst/src, so min_scale = 1.0 / max_upscale, etc.
7714 		 */
7715 		min_scale = (1000 << 16) / max_upscale;
7716 		max_scale = (1000 << 16) / min_downscale;
7717 	}
7718 
7719 	return drm_atomic_helper_check_plane_state(
7720 		state, new_crtc_state, min_scale, max_scale, true, true);
7721 }
7722 
7723 static int dm_plane_atomic_check(struct drm_plane *plane,
7724 				 struct drm_atomic_state *state)
7725 {
7726 	struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
7727 										 plane);
7728 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
7729 	struct dc *dc = adev->dm.dc;
7730 	struct dm_plane_state *dm_plane_state;
7731 	struct dc_scaling_info scaling_info;
7732 	struct drm_crtc_state *new_crtc_state;
7733 	int ret;
7734 
7735 	trace_amdgpu_dm_plane_atomic_check(new_plane_state);
7736 
7737 	dm_plane_state = to_dm_plane_state(new_plane_state);
7738 
7739 	if (!dm_plane_state->dc_state)
7740 		return 0;
7741 
7742 	new_crtc_state =
7743 		drm_atomic_get_new_crtc_state(state,
7744 					      new_plane_state->crtc);
7745 	if (!new_crtc_state)
7746 		return -EINVAL;
7747 
7748 	ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
7749 	if (ret)
7750 		return ret;
7751 
7752 	ret = fill_dc_scaling_info(adev, new_plane_state, &scaling_info);
7753 	if (ret)
7754 		return ret;
7755 
7756 	if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
7757 		return 0;
7758 
7759 	return -EINVAL;
7760 }
7761 
7762 static int dm_plane_atomic_async_check(struct drm_plane *plane,
7763 				       struct drm_atomic_state *state)
7764 {
7765 	/* Only support async updates on cursor planes. */
7766 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
7767 		return -EINVAL;
7768 
7769 	return 0;
7770 }
7771 
7772 static void dm_plane_atomic_async_update(struct drm_plane *plane,
7773 					 struct drm_atomic_state *state)
7774 {
7775 	struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
7776 									   plane);
7777 	struct drm_plane_state *old_state =
7778 		drm_atomic_get_old_plane_state(state, plane);
7779 
7780 	trace_amdgpu_dm_atomic_update_cursor(new_state);
7781 
7782 	swap(plane->state->fb, new_state->fb);
7783 
7784 	plane->state->src_x = new_state->src_x;
7785 	plane->state->src_y = new_state->src_y;
7786 	plane->state->src_w = new_state->src_w;
7787 	plane->state->src_h = new_state->src_h;
7788 	plane->state->crtc_x = new_state->crtc_x;
7789 	plane->state->crtc_y = new_state->crtc_y;
7790 	plane->state->crtc_w = new_state->crtc_w;
7791 	plane->state->crtc_h = new_state->crtc_h;
7792 
7793 	handle_cursor_update(plane, old_state);
7794 }
7795 
7796 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
7797 	.prepare_fb = dm_plane_helper_prepare_fb,
7798 	.cleanup_fb = dm_plane_helper_cleanup_fb,
7799 	.atomic_check = dm_plane_atomic_check,
7800 	.atomic_async_check = dm_plane_atomic_async_check,
7801 	.atomic_async_update = dm_plane_atomic_async_update
7802 };
7803 
7804 /*
7805  * TODO: these are currently initialized to rgb formats only.
7806  * For future use cases we should either initialize them dynamically based on
7807  * plane capabilities, or initialize this array to all formats, so internal drm
7808  * check will succeed, and let DC implement proper check
7809  */
7810 static const uint32_t rgb_formats[] = {
7811 	DRM_FORMAT_XRGB8888,
7812 	DRM_FORMAT_ARGB8888,
7813 	DRM_FORMAT_RGBA8888,
7814 	DRM_FORMAT_XRGB2101010,
7815 	DRM_FORMAT_XBGR2101010,
7816 	DRM_FORMAT_ARGB2101010,
7817 	DRM_FORMAT_ABGR2101010,
7818 	DRM_FORMAT_XRGB16161616,
7819 	DRM_FORMAT_XBGR16161616,
7820 	DRM_FORMAT_ARGB16161616,
7821 	DRM_FORMAT_ABGR16161616,
7822 	DRM_FORMAT_XBGR8888,
7823 	DRM_FORMAT_ABGR8888,
7824 	DRM_FORMAT_RGB565,
7825 };
7826 
7827 static const uint32_t overlay_formats[] = {
7828 	DRM_FORMAT_XRGB8888,
7829 	DRM_FORMAT_ARGB8888,
7830 	DRM_FORMAT_RGBA8888,
7831 	DRM_FORMAT_XBGR8888,
7832 	DRM_FORMAT_ABGR8888,
7833 	DRM_FORMAT_RGB565
7834 };
7835 
7836 static const u32 cursor_formats[] = {
7837 	DRM_FORMAT_ARGB8888
7838 };
7839 
7840 static int get_plane_formats(const struct drm_plane *plane,
7841 			     const struct dc_plane_cap *plane_cap,
7842 			     uint32_t *formats, int max_formats)
7843 {
7844 	int i, num_formats = 0;
7845 
7846 	/*
7847 	 * TODO: Query support for each group of formats directly from
7848 	 * DC plane caps. This will require adding more formats to the
7849 	 * caps list.
7850 	 */
7851 
7852 	switch (plane->type) {
7853 	case DRM_PLANE_TYPE_PRIMARY:
7854 		for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
7855 			if (num_formats >= max_formats)
7856 				break;
7857 
7858 			formats[num_formats++] = rgb_formats[i];
7859 		}
7860 
7861 		if (plane_cap && plane_cap->pixel_format_support.nv12)
7862 			formats[num_formats++] = DRM_FORMAT_NV12;
7863 		if (plane_cap && plane_cap->pixel_format_support.p010)
7864 			formats[num_formats++] = DRM_FORMAT_P010;
7865 		if (plane_cap && plane_cap->pixel_format_support.fp16) {
7866 			formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
7867 			formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
7868 			formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
7869 			formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
7870 		}
7871 		break;
7872 
7873 	case DRM_PLANE_TYPE_OVERLAY:
7874 		for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
7875 			if (num_formats >= max_formats)
7876 				break;
7877 
7878 			formats[num_formats++] = overlay_formats[i];
7879 		}
7880 		break;
7881 
7882 	case DRM_PLANE_TYPE_CURSOR:
7883 		for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
7884 			if (num_formats >= max_formats)
7885 				break;
7886 
7887 			formats[num_formats++] = cursor_formats[i];
7888 		}
7889 		break;
7890 	}
7891 
7892 	return num_formats;
7893 }
7894 
7895 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
7896 				struct drm_plane *plane,
7897 				unsigned long possible_crtcs,
7898 				const struct dc_plane_cap *plane_cap)
7899 {
7900 	uint32_t formats[32];
7901 	int num_formats;
7902 	int res = -EPERM;
7903 	unsigned int supported_rotations;
7904 	uint64_t *modifiers = NULL;
7905 
7906 	num_formats = get_plane_formats(plane, plane_cap, formats,
7907 					ARRAY_SIZE(formats));
7908 
7909 	res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
7910 	if (res)
7911 		return res;
7912 
7913 	if (modifiers == NULL)
7914 		adev_to_drm(dm->adev)->mode_config.fb_modifiers_not_supported = true;
7915 
7916 	res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
7917 				       &dm_plane_funcs, formats, num_formats,
7918 				       modifiers, plane->type, NULL);
7919 	kfree(modifiers);
7920 	if (res)
7921 		return res;
7922 
7923 	if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
7924 	    plane_cap && plane_cap->per_pixel_alpha) {
7925 		unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
7926 					  BIT(DRM_MODE_BLEND_PREMULTI) |
7927 					  BIT(DRM_MODE_BLEND_COVERAGE);
7928 
7929 		drm_plane_create_alpha_property(plane);
7930 		drm_plane_create_blend_mode_property(plane, blend_caps);
7931 	}
7932 
7933 	if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
7934 	    plane_cap &&
7935 	    (plane_cap->pixel_format_support.nv12 ||
7936 	     plane_cap->pixel_format_support.p010)) {
7937 		/* This only affects YUV formats. */
7938 		drm_plane_create_color_properties(
7939 			plane,
7940 			BIT(DRM_COLOR_YCBCR_BT601) |
7941 			BIT(DRM_COLOR_YCBCR_BT709) |
7942 			BIT(DRM_COLOR_YCBCR_BT2020),
7943 			BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
7944 			BIT(DRM_COLOR_YCBCR_FULL_RANGE),
7945 			DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
7946 	}
7947 
7948 	supported_rotations =
7949 		DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
7950 		DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
7951 
7952 	if (dm->adev->asic_type >= CHIP_BONAIRE &&
7953 	    plane->type != DRM_PLANE_TYPE_CURSOR)
7954 		drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
7955 						   supported_rotations);
7956 
7957 	drm_plane_helper_add(plane, &dm_plane_helper_funcs);
7958 
7959 	/* Create (reset) the plane state */
7960 	if (plane->funcs->reset)
7961 		plane->funcs->reset(plane);
7962 
7963 	return 0;
7964 }
7965 
7966 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
7967 			       struct drm_plane *plane,
7968 			       uint32_t crtc_index)
7969 {
7970 	struct amdgpu_crtc *acrtc = NULL;
7971 	struct drm_plane *cursor_plane;
7972 
7973 	int res = -ENOMEM;
7974 
7975 	cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
7976 	if (!cursor_plane)
7977 		goto fail;
7978 
7979 	cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
7980 	res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
7981 
7982 	acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
7983 	if (!acrtc)
7984 		goto fail;
7985 
7986 	res = drm_crtc_init_with_planes(
7987 			dm->ddev,
7988 			&acrtc->base,
7989 			plane,
7990 			cursor_plane,
7991 			&amdgpu_dm_crtc_funcs, NULL);
7992 
7993 	if (res)
7994 		goto fail;
7995 
7996 	drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
7997 
7998 	/* Create (reset) the plane state */
7999 	if (acrtc->base.funcs->reset)
8000 		acrtc->base.funcs->reset(&acrtc->base);
8001 
8002 	acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
8003 	acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
8004 
8005 	acrtc->crtc_id = crtc_index;
8006 	acrtc->base.enabled = false;
8007 	acrtc->otg_inst = -1;
8008 
8009 	dm->adev->mode_info.crtcs[crtc_index] = acrtc;
8010 	drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
8011 				   true, MAX_COLOR_LUT_ENTRIES);
8012 	drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
8013 
8014 	return 0;
8015 
8016 fail:
8017 	kfree(acrtc);
8018 	kfree(cursor_plane);
8019 	return res;
8020 }
8021 
8022 
8023 static int to_drm_connector_type(enum signal_type st)
8024 {
8025 	switch (st) {
8026 	case SIGNAL_TYPE_HDMI_TYPE_A:
8027 		return DRM_MODE_CONNECTOR_HDMIA;
8028 	case SIGNAL_TYPE_EDP:
8029 		return DRM_MODE_CONNECTOR_eDP;
8030 	case SIGNAL_TYPE_LVDS:
8031 		return DRM_MODE_CONNECTOR_LVDS;
8032 	case SIGNAL_TYPE_RGB:
8033 		return DRM_MODE_CONNECTOR_VGA;
8034 	case SIGNAL_TYPE_DISPLAY_PORT:
8035 	case SIGNAL_TYPE_DISPLAY_PORT_MST:
8036 		return DRM_MODE_CONNECTOR_DisplayPort;
8037 	case SIGNAL_TYPE_DVI_DUAL_LINK:
8038 	case SIGNAL_TYPE_DVI_SINGLE_LINK:
8039 		return DRM_MODE_CONNECTOR_DVID;
8040 	case SIGNAL_TYPE_VIRTUAL:
8041 		return DRM_MODE_CONNECTOR_VIRTUAL;
8042 
8043 	default:
8044 		return DRM_MODE_CONNECTOR_Unknown;
8045 	}
8046 }
8047 
8048 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
8049 {
8050 	struct drm_encoder *encoder;
8051 
8052 	/* There is only one encoder per connector */
8053 	drm_connector_for_each_possible_encoder(connector, encoder)
8054 		return encoder;
8055 
8056 	return NULL;
8057 }
8058 
8059 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
8060 {
8061 	struct drm_encoder *encoder;
8062 	struct amdgpu_encoder *amdgpu_encoder;
8063 
8064 	encoder = amdgpu_dm_connector_to_encoder(connector);
8065 
8066 	if (encoder == NULL)
8067 		return;
8068 
8069 	amdgpu_encoder = to_amdgpu_encoder(encoder);
8070 
8071 	amdgpu_encoder->native_mode.clock = 0;
8072 
8073 	if (!list_empty(&connector->probed_modes)) {
8074 		struct drm_display_mode *preferred_mode = NULL;
8075 
8076 		list_for_each_entry(preferred_mode,
8077 				    &connector->probed_modes,
8078 				    head) {
8079 			if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
8080 				amdgpu_encoder->native_mode = *preferred_mode;
8081 
8082 			break;
8083 		}
8084 
8085 	}
8086 }
8087 
8088 static struct drm_display_mode *
8089 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
8090 			     char *name,
8091 			     int hdisplay, int vdisplay)
8092 {
8093 	struct drm_device *dev = encoder->dev;
8094 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
8095 	struct drm_display_mode *mode = NULL;
8096 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
8097 
8098 	mode = drm_mode_duplicate(dev, native_mode);
8099 
8100 	if (mode == NULL)
8101 		return NULL;
8102 
8103 	mode->hdisplay = hdisplay;
8104 	mode->vdisplay = vdisplay;
8105 	mode->type &= ~DRM_MODE_TYPE_PREFERRED;
8106 	strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
8107 
8108 	return mode;
8109 
8110 }
8111 
8112 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
8113 						 struct drm_connector *connector)
8114 {
8115 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
8116 	struct drm_display_mode *mode = NULL;
8117 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
8118 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8119 				to_amdgpu_dm_connector(connector);
8120 	int i;
8121 	int n;
8122 	struct mode_size {
8123 		char name[DRM_DISPLAY_MODE_LEN];
8124 		int w;
8125 		int h;
8126 	} common_modes[] = {
8127 		{  "640x480",  640,  480},
8128 		{  "800x600",  800,  600},
8129 		{ "1024x768", 1024,  768},
8130 		{ "1280x720", 1280,  720},
8131 		{ "1280x800", 1280,  800},
8132 		{"1280x1024", 1280, 1024},
8133 		{ "1440x900", 1440,  900},
8134 		{"1680x1050", 1680, 1050},
8135 		{"1600x1200", 1600, 1200},
8136 		{"1920x1080", 1920, 1080},
8137 		{"1920x1200", 1920, 1200}
8138 	};
8139 
8140 	n = ARRAY_SIZE(common_modes);
8141 
8142 	for (i = 0; i < n; i++) {
8143 		struct drm_display_mode *curmode = NULL;
8144 		bool mode_existed = false;
8145 
8146 		if (common_modes[i].w > native_mode->hdisplay ||
8147 		    common_modes[i].h > native_mode->vdisplay ||
8148 		   (common_modes[i].w == native_mode->hdisplay &&
8149 		    common_modes[i].h == native_mode->vdisplay))
8150 			continue;
8151 
8152 		list_for_each_entry(curmode, &connector->probed_modes, head) {
8153 			if (common_modes[i].w == curmode->hdisplay &&
8154 			    common_modes[i].h == curmode->vdisplay) {
8155 				mode_existed = true;
8156 				break;
8157 			}
8158 		}
8159 
8160 		if (mode_existed)
8161 			continue;
8162 
8163 		mode = amdgpu_dm_create_common_mode(encoder,
8164 				common_modes[i].name, common_modes[i].w,
8165 				common_modes[i].h);
8166 		if (!mode)
8167 			continue;
8168 
8169 		drm_mode_probed_add(connector, mode);
8170 		amdgpu_dm_connector->num_modes++;
8171 	}
8172 }
8173 
8174 static void amdgpu_set_panel_orientation(struct drm_connector *connector)
8175 {
8176 	struct drm_encoder *encoder;
8177 	struct amdgpu_encoder *amdgpu_encoder;
8178 	const struct drm_display_mode *native_mode;
8179 
8180 	if (connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
8181 	    connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
8182 		return;
8183 
8184 	encoder = amdgpu_dm_connector_to_encoder(connector);
8185 	if (!encoder)
8186 		return;
8187 
8188 	amdgpu_encoder = to_amdgpu_encoder(encoder);
8189 
8190 	native_mode = &amdgpu_encoder->native_mode;
8191 	if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0)
8192 		return;
8193 
8194 	drm_connector_set_panel_orientation_with_quirk(connector,
8195 						       DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
8196 						       native_mode->hdisplay,
8197 						       native_mode->vdisplay);
8198 }
8199 
8200 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
8201 					      struct edid *edid)
8202 {
8203 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8204 			to_amdgpu_dm_connector(connector);
8205 
8206 	if (edid) {
8207 		/* empty probed_modes */
8208 		INIT_LIST_HEAD(&connector->probed_modes);
8209 		amdgpu_dm_connector->num_modes =
8210 				drm_add_edid_modes(connector, edid);
8211 
8212 		/* sorting the probed modes before calling function
8213 		 * amdgpu_dm_get_native_mode() since EDID can have
8214 		 * more than one preferred mode. The modes that are
8215 		 * later in the probed mode list could be of higher
8216 		 * and preferred resolution. For example, 3840x2160
8217 		 * resolution in base EDID preferred timing and 4096x2160
8218 		 * preferred resolution in DID extension block later.
8219 		 */
8220 		drm_mode_sort(&connector->probed_modes);
8221 		amdgpu_dm_get_native_mode(connector);
8222 
8223 		/* Freesync capabilities are reset by calling
8224 		 * drm_add_edid_modes() and need to be
8225 		 * restored here.
8226 		 */
8227 		amdgpu_dm_update_freesync_caps(connector, edid);
8228 
8229 		amdgpu_set_panel_orientation(connector);
8230 	} else {
8231 		amdgpu_dm_connector->num_modes = 0;
8232 	}
8233 }
8234 
8235 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
8236 			      struct drm_display_mode *mode)
8237 {
8238 	struct drm_display_mode *m;
8239 
8240 	list_for_each_entry (m, &aconnector->base.probed_modes, head) {
8241 		if (drm_mode_equal(m, mode))
8242 			return true;
8243 	}
8244 
8245 	return false;
8246 }
8247 
8248 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
8249 {
8250 	const struct drm_display_mode *m;
8251 	struct drm_display_mode *new_mode;
8252 	uint i;
8253 	uint32_t new_modes_count = 0;
8254 
8255 	/* Standard FPS values
8256 	 *
8257 	 * 23.976       - TV/NTSC
8258 	 * 24 	        - Cinema
8259 	 * 25 	        - TV/PAL
8260 	 * 29.97        - TV/NTSC
8261 	 * 30 	        - TV/NTSC
8262 	 * 48 	        - Cinema HFR
8263 	 * 50 	        - TV/PAL
8264 	 * 60 	        - Commonly used
8265 	 * 48,72,96,120 - Multiples of 24
8266 	 */
8267 	static const uint32_t common_rates[] = {
8268 		23976, 24000, 25000, 29970, 30000,
8269 		48000, 50000, 60000, 72000, 96000, 120000
8270 	};
8271 
8272 	/*
8273 	 * Find mode with highest refresh rate with the same resolution
8274 	 * as the preferred mode. Some monitors report a preferred mode
8275 	 * with lower resolution than the highest refresh rate supported.
8276 	 */
8277 
8278 	m = get_highest_refresh_rate_mode(aconnector, true);
8279 	if (!m)
8280 		return 0;
8281 
8282 	for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
8283 		uint64_t target_vtotal, target_vtotal_diff;
8284 		uint64_t num, den;
8285 
8286 		if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
8287 			continue;
8288 
8289 		if (common_rates[i] < aconnector->min_vfreq * 1000 ||
8290 		    common_rates[i] > aconnector->max_vfreq * 1000)
8291 			continue;
8292 
8293 		num = (unsigned long long)m->clock * 1000 * 1000;
8294 		den = common_rates[i] * (unsigned long long)m->htotal;
8295 		target_vtotal = div_u64(num, den);
8296 		target_vtotal_diff = target_vtotal - m->vtotal;
8297 
8298 		/* Check for illegal modes */
8299 		if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
8300 		    m->vsync_end + target_vtotal_diff < m->vsync_start ||
8301 		    m->vtotal + target_vtotal_diff < m->vsync_end)
8302 			continue;
8303 
8304 		new_mode = drm_mode_duplicate(aconnector->base.dev, m);
8305 		if (!new_mode)
8306 			goto out;
8307 
8308 		new_mode->vtotal += (u16)target_vtotal_diff;
8309 		new_mode->vsync_start += (u16)target_vtotal_diff;
8310 		new_mode->vsync_end += (u16)target_vtotal_diff;
8311 		new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
8312 		new_mode->type |= DRM_MODE_TYPE_DRIVER;
8313 
8314 		if (!is_duplicate_mode(aconnector, new_mode)) {
8315 			drm_mode_probed_add(&aconnector->base, new_mode);
8316 			new_modes_count += 1;
8317 		} else
8318 			drm_mode_destroy(aconnector->base.dev, new_mode);
8319 	}
8320  out:
8321 	return new_modes_count;
8322 }
8323 
8324 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
8325 						   struct edid *edid)
8326 {
8327 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8328 		to_amdgpu_dm_connector(connector);
8329 
8330 	if (!edid)
8331 		return;
8332 
8333 	if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
8334 		amdgpu_dm_connector->num_modes +=
8335 			add_fs_modes(amdgpu_dm_connector);
8336 }
8337 
8338 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
8339 {
8340 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8341 			to_amdgpu_dm_connector(connector);
8342 	struct drm_encoder *encoder;
8343 	struct edid *edid = amdgpu_dm_connector->edid;
8344 
8345 	encoder = amdgpu_dm_connector_to_encoder(connector);
8346 
8347 	if (!drm_edid_is_valid(edid)) {
8348 		amdgpu_dm_connector->num_modes =
8349 				drm_add_modes_noedid(connector, 640, 480);
8350 	} else {
8351 		amdgpu_dm_connector_ddc_get_modes(connector, edid);
8352 		amdgpu_dm_connector_add_common_modes(encoder, connector);
8353 		amdgpu_dm_connector_add_freesync_modes(connector, edid);
8354 	}
8355 	amdgpu_dm_fbc_init(connector);
8356 
8357 	return amdgpu_dm_connector->num_modes;
8358 }
8359 
8360 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
8361 				     struct amdgpu_dm_connector *aconnector,
8362 				     int connector_type,
8363 				     struct dc_link *link,
8364 				     int link_index)
8365 {
8366 	struct amdgpu_device *adev = drm_to_adev(dm->ddev);
8367 
8368 	/*
8369 	 * Some of the properties below require access to state, like bpc.
8370 	 * Allocate some default initial connector state with our reset helper.
8371 	 */
8372 	if (aconnector->base.funcs->reset)
8373 		aconnector->base.funcs->reset(&aconnector->base);
8374 
8375 	aconnector->connector_id = link_index;
8376 	aconnector->dc_link = link;
8377 	aconnector->base.interlace_allowed = false;
8378 	aconnector->base.doublescan_allowed = false;
8379 	aconnector->base.stereo_allowed = false;
8380 	aconnector->base.dpms = DRM_MODE_DPMS_OFF;
8381 	aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
8382 	aconnector->audio_inst = -1;
8383 	mutex_init(&aconnector->hpd_lock);
8384 
8385 	/*
8386 	 * configure support HPD hot plug connector_>polled default value is 0
8387 	 * which means HPD hot plug not supported
8388 	 */
8389 	switch (connector_type) {
8390 	case DRM_MODE_CONNECTOR_HDMIA:
8391 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8392 		aconnector->base.ycbcr_420_allowed =
8393 			link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
8394 		break;
8395 	case DRM_MODE_CONNECTOR_DisplayPort:
8396 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8397 		link->link_enc = link_enc_cfg_get_link_enc(link);
8398 		ASSERT(link->link_enc);
8399 		if (link->link_enc)
8400 			aconnector->base.ycbcr_420_allowed =
8401 			link->link_enc->features.dp_ycbcr420_supported ? true : false;
8402 		break;
8403 	case DRM_MODE_CONNECTOR_DVID:
8404 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8405 		break;
8406 	default:
8407 		break;
8408 	}
8409 
8410 	drm_object_attach_property(&aconnector->base.base,
8411 				dm->ddev->mode_config.scaling_mode_property,
8412 				DRM_MODE_SCALE_NONE);
8413 
8414 	drm_object_attach_property(&aconnector->base.base,
8415 				adev->mode_info.underscan_property,
8416 				UNDERSCAN_OFF);
8417 	drm_object_attach_property(&aconnector->base.base,
8418 				adev->mode_info.underscan_hborder_property,
8419 				0);
8420 	drm_object_attach_property(&aconnector->base.base,
8421 				adev->mode_info.underscan_vborder_property,
8422 				0);
8423 
8424 	if (!aconnector->mst_port)
8425 		drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
8426 
8427 	/* This defaults to the max in the range, but we want 8bpc for non-edp. */
8428 	aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
8429 	aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
8430 
8431 	if (connector_type == DRM_MODE_CONNECTOR_eDP &&
8432 	    (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
8433 		drm_object_attach_property(&aconnector->base.base,
8434 				adev->mode_info.abm_level_property, 0);
8435 	}
8436 
8437 	if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
8438 	    connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
8439 	    connector_type == DRM_MODE_CONNECTOR_eDP) {
8440 		drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
8441 
8442 		if (!aconnector->mst_port)
8443 			drm_connector_attach_vrr_capable_property(&aconnector->base);
8444 
8445 #ifdef CONFIG_DRM_AMD_DC_HDCP
8446 		if (adev->dm.hdcp_workqueue)
8447 			drm_connector_attach_content_protection_property(&aconnector->base, true);
8448 #endif
8449 	}
8450 }
8451 
8452 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
8453 			      struct i2c_msg *msgs, int num)
8454 {
8455 	struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
8456 	struct ddc_service *ddc_service = i2c->ddc_service;
8457 	struct i2c_command cmd;
8458 	int i;
8459 	int result = -EIO;
8460 
8461 	cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
8462 
8463 	if (!cmd.payloads)
8464 		return result;
8465 
8466 	cmd.number_of_payloads = num;
8467 	cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
8468 	cmd.speed = 100;
8469 
8470 	for (i = 0; i < num; i++) {
8471 		cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
8472 		cmd.payloads[i].address = msgs[i].addr;
8473 		cmd.payloads[i].length = msgs[i].len;
8474 		cmd.payloads[i].data = msgs[i].buf;
8475 	}
8476 
8477 	if (dc_submit_i2c(
8478 			ddc_service->ctx->dc,
8479 			ddc_service->ddc_pin->hw_info.ddc_channel,
8480 			&cmd))
8481 		result = num;
8482 
8483 	kfree(cmd.payloads);
8484 	return result;
8485 }
8486 
8487 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
8488 {
8489 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
8490 }
8491 
8492 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
8493 	.master_xfer = amdgpu_dm_i2c_xfer,
8494 	.functionality = amdgpu_dm_i2c_func,
8495 };
8496 
8497 static struct amdgpu_i2c_adapter *
8498 create_i2c(struct ddc_service *ddc_service,
8499 	   int link_index,
8500 	   int *res)
8501 {
8502 	struct amdgpu_device *adev = ddc_service->ctx->driver_context;
8503 	struct amdgpu_i2c_adapter *i2c;
8504 
8505 	i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
8506 	if (!i2c)
8507 		return NULL;
8508 	i2c->base.owner = THIS_MODULE;
8509 	i2c->base.class = I2C_CLASS_DDC;
8510 	i2c->base.dev.parent = &adev->pdev->dev;
8511 	i2c->base.algo = &amdgpu_dm_i2c_algo;
8512 	snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
8513 	i2c_set_adapdata(&i2c->base, i2c);
8514 	i2c->ddc_service = ddc_service;
8515 	if (i2c->ddc_service->ddc_pin)
8516 		i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
8517 
8518 	return i2c;
8519 }
8520 
8521 
8522 /*
8523  * Note: this function assumes that dc_link_detect() was called for the
8524  * dc_link which will be represented by this aconnector.
8525  */
8526 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
8527 				    struct amdgpu_dm_connector *aconnector,
8528 				    uint32_t link_index,
8529 				    struct amdgpu_encoder *aencoder)
8530 {
8531 	int res = 0;
8532 	int connector_type;
8533 	struct dc *dc = dm->dc;
8534 	struct dc_link *link = dc_get_link_at_index(dc, link_index);
8535 	struct amdgpu_i2c_adapter *i2c;
8536 
8537 	link->priv = aconnector;
8538 
8539 	DRM_DEBUG_DRIVER("%s()\n", __func__);
8540 
8541 	i2c = create_i2c(link->ddc, link->link_index, &res);
8542 	if (!i2c) {
8543 		DRM_ERROR("Failed to create i2c adapter data\n");
8544 		return -ENOMEM;
8545 	}
8546 
8547 	aconnector->i2c = i2c;
8548 	res = i2c_add_adapter(&i2c->base);
8549 
8550 	if (res) {
8551 		DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
8552 		goto out_free;
8553 	}
8554 
8555 	connector_type = to_drm_connector_type(link->connector_signal);
8556 
8557 	res = drm_connector_init_with_ddc(
8558 			dm->ddev,
8559 			&aconnector->base,
8560 			&amdgpu_dm_connector_funcs,
8561 			connector_type,
8562 			&i2c->base);
8563 
8564 	if (res) {
8565 		DRM_ERROR("connector_init failed\n");
8566 		aconnector->connector_id = -1;
8567 		goto out_free;
8568 	}
8569 
8570 	drm_connector_helper_add(
8571 			&aconnector->base,
8572 			&amdgpu_dm_connector_helper_funcs);
8573 
8574 	amdgpu_dm_connector_init_helper(
8575 		dm,
8576 		aconnector,
8577 		connector_type,
8578 		link,
8579 		link_index);
8580 
8581 	drm_connector_attach_encoder(
8582 		&aconnector->base, &aencoder->base);
8583 
8584 	if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
8585 		|| connector_type == DRM_MODE_CONNECTOR_eDP)
8586 		amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
8587 
8588 out_free:
8589 	if (res) {
8590 		kfree(i2c);
8591 		aconnector->i2c = NULL;
8592 	}
8593 	return res;
8594 }
8595 
8596 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
8597 {
8598 	switch (adev->mode_info.num_crtc) {
8599 	case 1:
8600 		return 0x1;
8601 	case 2:
8602 		return 0x3;
8603 	case 3:
8604 		return 0x7;
8605 	case 4:
8606 		return 0xf;
8607 	case 5:
8608 		return 0x1f;
8609 	case 6:
8610 	default:
8611 		return 0x3f;
8612 	}
8613 }
8614 
8615 static int amdgpu_dm_encoder_init(struct drm_device *dev,
8616 				  struct amdgpu_encoder *aencoder,
8617 				  uint32_t link_index)
8618 {
8619 	struct amdgpu_device *adev = drm_to_adev(dev);
8620 
8621 	int res = drm_encoder_init(dev,
8622 				   &aencoder->base,
8623 				   &amdgpu_dm_encoder_funcs,
8624 				   DRM_MODE_ENCODER_TMDS,
8625 				   NULL);
8626 
8627 	aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
8628 
8629 	if (!res)
8630 		aencoder->encoder_id = link_index;
8631 	else
8632 		aencoder->encoder_id = -1;
8633 
8634 	drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
8635 
8636 	return res;
8637 }
8638 
8639 static void manage_dm_interrupts(struct amdgpu_device *adev,
8640 				 struct amdgpu_crtc *acrtc,
8641 				 bool enable)
8642 {
8643 	/*
8644 	 * We have no guarantee that the frontend index maps to the same
8645 	 * backend index - some even map to more than one.
8646 	 *
8647 	 * TODO: Use a different interrupt or check DC itself for the mapping.
8648 	 */
8649 	int irq_type =
8650 		amdgpu_display_crtc_idx_to_irq_type(
8651 			adev,
8652 			acrtc->crtc_id);
8653 
8654 	if (enable) {
8655 		drm_crtc_vblank_on(&acrtc->base);
8656 		amdgpu_irq_get(
8657 			adev,
8658 			&adev->pageflip_irq,
8659 			irq_type);
8660 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8661 		amdgpu_irq_get(
8662 			adev,
8663 			&adev->vline0_irq,
8664 			irq_type);
8665 #endif
8666 	} else {
8667 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8668 		amdgpu_irq_put(
8669 			adev,
8670 			&adev->vline0_irq,
8671 			irq_type);
8672 #endif
8673 		amdgpu_irq_put(
8674 			adev,
8675 			&adev->pageflip_irq,
8676 			irq_type);
8677 		drm_crtc_vblank_off(&acrtc->base);
8678 	}
8679 }
8680 
8681 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
8682 				      struct amdgpu_crtc *acrtc)
8683 {
8684 	int irq_type =
8685 		amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
8686 
8687 	/**
8688 	 * This reads the current state for the IRQ and force reapplies
8689 	 * the setting to hardware.
8690 	 */
8691 	amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
8692 }
8693 
8694 static bool
8695 is_scaling_state_different(const struct dm_connector_state *dm_state,
8696 			   const struct dm_connector_state *old_dm_state)
8697 {
8698 	if (dm_state->scaling != old_dm_state->scaling)
8699 		return true;
8700 	if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
8701 		if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
8702 			return true;
8703 	} else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
8704 		if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
8705 			return true;
8706 	} else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
8707 		   dm_state->underscan_vborder != old_dm_state->underscan_vborder)
8708 		return true;
8709 	return false;
8710 }
8711 
8712 #ifdef CONFIG_DRM_AMD_DC_HDCP
8713 static bool is_content_protection_different(struct drm_connector_state *state,
8714 					    const struct drm_connector_state *old_state,
8715 					    const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
8716 {
8717 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8718 	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
8719 
8720 	/* Handle: Type0/1 change */
8721 	if (old_state->hdcp_content_type != state->hdcp_content_type &&
8722 	    state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
8723 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8724 		return true;
8725 	}
8726 
8727 	/* CP is being re enabled, ignore this
8728 	 *
8729 	 * Handles:	ENABLED -> DESIRED
8730 	 */
8731 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
8732 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8733 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
8734 		return false;
8735 	}
8736 
8737 	/* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
8738 	 *
8739 	 * Handles:	UNDESIRED -> ENABLED
8740 	 */
8741 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
8742 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
8743 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8744 
8745 	/* Stream removed and re-enabled
8746 	 *
8747 	 * Can sometimes overlap with the HPD case,
8748 	 * thus set update_hdcp to false to avoid
8749 	 * setting HDCP multiple times.
8750 	 *
8751 	 * Handles:	DESIRED -> DESIRED (Special case)
8752 	 */
8753 	if (!(old_state->crtc && old_state->crtc->enabled) &&
8754 		state->crtc && state->crtc->enabled &&
8755 		connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8756 		dm_con_state->update_hdcp = false;
8757 		return true;
8758 	}
8759 
8760 	/* Hot-plug, headless s3, dpms
8761 	 *
8762 	 * Only start HDCP if the display is connected/enabled.
8763 	 * update_hdcp flag will be set to false until the next
8764 	 * HPD comes in.
8765 	 *
8766 	 * Handles:	DESIRED -> DESIRED (Special case)
8767 	 */
8768 	if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
8769 	    connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
8770 		dm_con_state->update_hdcp = false;
8771 		return true;
8772 	}
8773 
8774 	/*
8775 	 * Handles:	UNDESIRED -> UNDESIRED
8776 	 *		DESIRED -> DESIRED
8777 	 *		ENABLED -> ENABLED
8778 	 */
8779 	if (old_state->content_protection == state->content_protection)
8780 		return false;
8781 
8782 	/*
8783 	 * Handles:	UNDESIRED -> DESIRED
8784 	 *		DESIRED -> UNDESIRED
8785 	 *		ENABLED -> UNDESIRED
8786 	 */
8787 	if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
8788 		return true;
8789 
8790 	/*
8791 	 * Handles:	DESIRED -> ENABLED
8792 	 */
8793 	return false;
8794 }
8795 
8796 #endif
8797 static void remove_stream(struct amdgpu_device *adev,
8798 			  struct amdgpu_crtc *acrtc,
8799 			  struct dc_stream_state *stream)
8800 {
8801 	/* this is the update mode case */
8802 
8803 	acrtc->otg_inst = -1;
8804 	acrtc->enabled = false;
8805 }
8806 
8807 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
8808 			       struct dc_cursor_position *position)
8809 {
8810 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8811 	int x, y;
8812 	int xorigin = 0, yorigin = 0;
8813 
8814 	if (!crtc || !plane->state->fb)
8815 		return 0;
8816 
8817 	if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
8818 	    (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
8819 		DRM_ERROR("%s: bad cursor width or height %d x %d\n",
8820 			  __func__,
8821 			  plane->state->crtc_w,
8822 			  plane->state->crtc_h);
8823 		return -EINVAL;
8824 	}
8825 
8826 	x = plane->state->crtc_x;
8827 	y = plane->state->crtc_y;
8828 
8829 	if (x <= -amdgpu_crtc->max_cursor_width ||
8830 	    y <= -amdgpu_crtc->max_cursor_height)
8831 		return 0;
8832 
8833 	if (x < 0) {
8834 		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
8835 		x = 0;
8836 	}
8837 	if (y < 0) {
8838 		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
8839 		y = 0;
8840 	}
8841 	position->enable = true;
8842 	position->translate_by_source = true;
8843 	position->x = x;
8844 	position->y = y;
8845 	position->x_hotspot = xorigin;
8846 	position->y_hotspot = yorigin;
8847 
8848 	return 0;
8849 }
8850 
8851 static void handle_cursor_update(struct drm_plane *plane,
8852 				 struct drm_plane_state *old_plane_state)
8853 {
8854 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
8855 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
8856 	struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
8857 	struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
8858 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8859 	uint64_t address = afb ? afb->address : 0;
8860 	struct dc_cursor_position position = {0};
8861 	struct dc_cursor_attributes attributes;
8862 	int ret;
8863 
8864 	if (!plane->state->fb && !old_plane_state->fb)
8865 		return;
8866 
8867 	DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
8868 		      __func__,
8869 		      amdgpu_crtc->crtc_id,
8870 		      plane->state->crtc_w,
8871 		      plane->state->crtc_h);
8872 
8873 	ret = get_cursor_position(plane, crtc, &position);
8874 	if (ret)
8875 		return;
8876 
8877 	if (!position.enable) {
8878 		/* turn off cursor */
8879 		if (crtc_state && crtc_state->stream) {
8880 			mutex_lock(&adev->dm.dc_lock);
8881 			dc_stream_set_cursor_position(crtc_state->stream,
8882 						      &position);
8883 			mutex_unlock(&adev->dm.dc_lock);
8884 		}
8885 		return;
8886 	}
8887 
8888 	amdgpu_crtc->cursor_width = plane->state->crtc_w;
8889 	amdgpu_crtc->cursor_height = plane->state->crtc_h;
8890 
8891 	memset(&attributes, 0, sizeof(attributes));
8892 	attributes.address.high_part = upper_32_bits(address);
8893 	attributes.address.low_part  = lower_32_bits(address);
8894 	attributes.width             = plane->state->crtc_w;
8895 	attributes.height            = plane->state->crtc_h;
8896 	attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
8897 	attributes.rotation_angle    = 0;
8898 	attributes.attribute_flags.value = 0;
8899 
8900 	attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
8901 
8902 	if (crtc_state->stream) {
8903 		mutex_lock(&adev->dm.dc_lock);
8904 		if (!dc_stream_set_cursor_attributes(crtc_state->stream,
8905 							 &attributes))
8906 			DRM_ERROR("DC failed to set cursor attributes\n");
8907 
8908 		if (!dc_stream_set_cursor_position(crtc_state->stream,
8909 						   &position))
8910 			DRM_ERROR("DC failed to set cursor position\n");
8911 		mutex_unlock(&adev->dm.dc_lock);
8912 	}
8913 }
8914 
8915 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
8916 {
8917 
8918 	assert_spin_locked(&acrtc->base.dev->event_lock);
8919 	WARN_ON(acrtc->event);
8920 
8921 	acrtc->event = acrtc->base.state->event;
8922 
8923 	/* Set the flip status */
8924 	acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
8925 
8926 	/* Mark this event as consumed */
8927 	acrtc->base.state->event = NULL;
8928 
8929 	DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
8930 		     acrtc->crtc_id);
8931 }
8932 
8933 static void update_freesync_state_on_stream(
8934 	struct amdgpu_display_manager *dm,
8935 	struct dm_crtc_state *new_crtc_state,
8936 	struct dc_stream_state *new_stream,
8937 	struct dc_plane_state *surface,
8938 	u32 flip_timestamp_in_us)
8939 {
8940 	struct mod_vrr_params vrr_params;
8941 	struct dc_info_packet vrr_infopacket = {0};
8942 	struct amdgpu_device *adev = dm->adev;
8943 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8944 	unsigned long flags;
8945 	bool pack_sdp_v1_3 = false;
8946 
8947 	if (!new_stream)
8948 		return;
8949 
8950 	/*
8951 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8952 	 * For now it's sufficient to just guard against these conditions.
8953 	 */
8954 
8955 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8956 		return;
8957 
8958 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8959         vrr_params = acrtc->dm_irq_params.vrr_params;
8960 
8961 	if (surface) {
8962 		mod_freesync_handle_preflip(
8963 			dm->freesync_module,
8964 			surface,
8965 			new_stream,
8966 			flip_timestamp_in_us,
8967 			&vrr_params);
8968 
8969 		if (adev->family < AMDGPU_FAMILY_AI &&
8970 		    amdgpu_dm_vrr_active(new_crtc_state)) {
8971 			mod_freesync_handle_v_update(dm->freesync_module,
8972 						     new_stream, &vrr_params);
8973 
8974 			/* Need to call this before the frame ends. */
8975 			dc_stream_adjust_vmin_vmax(dm->dc,
8976 						   new_crtc_state->stream,
8977 						   &vrr_params.adjust);
8978 		}
8979 	}
8980 
8981 	mod_freesync_build_vrr_infopacket(
8982 		dm->freesync_module,
8983 		new_stream,
8984 		&vrr_params,
8985 		PACKET_TYPE_VRR,
8986 		TRANSFER_FUNC_UNKNOWN,
8987 		&vrr_infopacket,
8988 		pack_sdp_v1_3);
8989 
8990 	new_crtc_state->freesync_timing_changed |=
8991 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8992 			&vrr_params.adjust,
8993 			sizeof(vrr_params.adjust)) != 0);
8994 
8995 	new_crtc_state->freesync_vrr_info_changed |=
8996 		(memcmp(&new_crtc_state->vrr_infopacket,
8997 			&vrr_infopacket,
8998 			sizeof(vrr_infopacket)) != 0);
8999 
9000 	acrtc->dm_irq_params.vrr_params = vrr_params;
9001 	new_crtc_state->vrr_infopacket = vrr_infopacket;
9002 
9003 	new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
9004 	new_stream->vrr_infopacket = vrr_infopacket;
9005 
9006 	if (new_crtc_state->freesync_vrr_info_changed)
9007 		DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
9008 			      new_crtc_state->base.crtc->base.id,
9009 			      (int)new_crtc_state->base.vrr_enabled,
9010 			      (int)vrr_params.state);
9011 
9012 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9013 }
9014 
9015 static void update_stream_irq_parameters(
9016 	struct amdgpu_display_manager *dm,
9017 	struct dm_crtc_state *new_crtc_state)
9018 {
9019 	struct dc_stream_state *new_stream = new_crtc_state->stream;
9020 	struct mod_vrr_params vrr_params;
9021 	struct mod_freesync_config config = new_crtc_state->freesync_config;
9022 	struct amdgpu_device *adev = dm->adev;
9023 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
9024 	unsigned long flags;
9025 
9026 	if (!new_stream)
9027 		return;
9028 
9029 	/*
9030 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
9031 	 * For now it's sufficient to just guard against these conditions.
9032 	 */
9033 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
9034 		return;
9035 
9036 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9037 	vrr_params = acrtc->dm_irq_params.vrr_params;
9038 
9039 	if (new_crtc_state->vrr_supported &&
9040 	    config.min_refresh_in_uhz &&
9041 	    config.max_refresh_in_uhz) {
9042 		/*
9043 		 * if freesync compatible mode was set, config.state will be set
9044 		 * in atomic check
9045 		 */
9046 		if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
9047 		    (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
9048 		     new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
9049 			vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
9050 			vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
9051 			vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
9052 			vrr_params.state = VRR_STATE_ACTIVE_FIXED;
9053 		} else {
9054 			config.state = new_crtc_state->base.vrr_enabled ?
9055 						     VRR_STATE_ACTIVE_VARIABLE :
9056 						     VRR_STATE_INACTIVE;
9057 		}
9058 	} else {
9059 		config.state = VRR_STATE_UNSUPPORTED;
9060 	}
9061 
9062 	mod_freesync_build_vrr_params(dm->freesync_module,
9063 				      new_stream,
9064 				      &config, &vrr_params);
9065 
9066 	new_crtc_state->freesync_timing_changed |=
9067 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
9068 			&vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
9069 
9070 	new_crtc_state->freesync_config = config;
9071 	/* Copy state for access from DM IRQ handler */
9072 	acrtc->dm_irq_params.freesync_config = config;
9073 	acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
9074 	acrtc->dm_irq_params.vrr_params = vrr_params;
9075 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9076 }
9077 
9078 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
9079 					    struct dm_crtc_state *new_state)
9080 {
9081 	bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
9082 	bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
9083 
9084 	if (!old_vrr_active && new_vrr_active) {
9085 		/* Transition VRR inactive -> active:
9086 		 * While VRR is active, we must not disable vblank irq, as a
9087 		 * reenable after disable would compute bogus vblank/pflip
9088 		 * timestamps if it likely happened inside display front-porch.
9089 		 *
9090 		 * We also need vupdate irq for the actual core vblank handling
9091 		 * at end of vblank.
9092 		 */
9093 		dm_set_vupdate_irq(new_state->base.crtc, true);
9094 		drm_crtc_vblank_get(new_state->base.crtc);
9095 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
9096 				 __func__, new_state->base.crtc->base.id);
9097 	} else if (old_vrr_active && !new_vrr_active) {
9098 		/* Transition VRR active -> inactive:
9099 		 * Allow vblank irq disable again for fixed refresh rate.
9100 		 */
9101 		dm_set_vupdate_irq(new_state->base.crtc, false);
9102 		drm_crtc_vblank_put(new_state->base.crtc);
9103 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
9104 				 __func__, new_state->base.crtc->base.id);
9105 	}
9106 }
9107 
9108 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
9109 {
9110 	struct drm_plane *plane;
9111 	struct drm_plane_state *old_plane_state;
9112 	int i;
9113 
9114 	/*
9115 	 * TODO: Make this per-stream so we don't issue redundant updates for
9116 	 * commits with multiple streams.
9117 	 */
9118 	for_each_old_plane_in_state(state, plane, old_plane_state, i)
9119 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
9120 			handle_cursor_update(plane, old_plane_state);
9121 }
9122 
9123 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
9124 				    struct dc_state *dc_state,
9125 				    struct drm_device *dev,
9126 				    struct amdgpu_display_manager *dm,
9127 				    struct drm_crtc *pcrtc,
9128 				    bool wait_for_vblank)
9129 {
9130 	uint32_t i;
9131 	uint64_t timestamp_ns;
9132 	struct drm_plane *plane;
9133 	struct drm_plane_state *old_plane_state, *new_plane_state;
9134 	struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
9135 	struct drm_crtc_state *new_pcrtc_state =
9136 			drm_atomic_get_new_crtc_state(state, pcrtc);
9137 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
9138 	struct dm_crtc_state *dm_old_crtc_state =
9139 			to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
9140 	int planes_count = 0, vpos, hpos;
9141 	unsigned long flags;
9142 	uint32_t target_vblank, last_flip_vblank;
9143 	bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
9144 	bool pflip_present = false;
9145 	struct {
9146 		struct dc_surface_update surface_updates[MAX_SURFACES];
9147 		struct dc_plane_info plane_infos[MAX_SURFACES];
9148 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
9149 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
9150 		struct dc_stream_update stream_update;
9151 	} *bundle;
9152 
9153 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
9154 
9155 	if (!bundle) {
9156 		dm_error("Failed to allocate update bundle\n");
9157 		goto cleanup;
9158 	}
9159 
9160 	/*
9161 	 * Disable the cursor first if we're disabling all the planes.
9162 	 * It'll remain on the screen after the planes are re-enabled
9163 	 * if we don't.
9164 	 */
9165 	if (acrtc_state->active_planes == 0)
9166 		amdgpu_dm_commit_cursors(state);
9167 
9168 	/* update planes when needed */
9169 	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
9170 		struct drm_crtc *crtc = new_plane_state->crtc;
9171 		struct drm_crtc_state *new_crtc_state;
9172 		struct drm_framebuffer *fb = new_plane_state->fb;
9173 		struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
9174 		bool plane_needs_flip;
9175 		struct dc_plane_state *dc_plane;
9176 		struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
9177 
9178 		/* Cursor plane is handled after stream updates */
9179 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
9180 			continue;
9181 
9182 		if (!fb || !crtc || pcrtc != crtc)
9183 			continue;
9184 
9185 		new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
9186 		if (!new_crtc_state->active)
9187 			continue;
9188 
9189 		dc_plane = dm_new_plane_state->dc_state;
9190 
9191 		bundle->surface_updates[planes_count].surface = dc_plane;
9192 		if (new_pcrtc_state->color_mgmt_changed) {
9193 			bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
9194 			bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
9195 			bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
9196 		}
9197 
9198 		fill_dc_scaling_info(dm->adev, new_plane_state,
9199 				     &bundle->scaling_infos[planes_count]);
9200 
9201 		bundle->surface_updates[planes_count].scaling_info =
9202 			&bundle->scaling_infos[planes_count];
9203 
9204 		plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
9205 
9206 		pflip_present = pflip_present || plane_needs_flip;
9207 
9208 		if (!plane_needs_flip) {
9209 			planes_count += 1;
9210 			continue;
9211 		}
9212 
9213 		fill_dc_plane_info_and_addr(
9214 			dm->adev, new_plane_state,
9215 			afb->tiling_flags,
9216 			&bundle->plane_infos[planes_count],
9217 			&bundle->flip_addrs[planes_count].address,
9218 			afb->tmz_surface, false);
9219 
9220 		drm_dbg_state(state->dev, "plane: id=%d dcc_en=%d\n",
9221 				 new_plane_state->plane->index,
9222 				 bundle->plane_infos[planes_count].dcc.enable);
9223 
9224 		bundle->surface_updates[planes_count].plane_info =
9225 			&bundle->plane_infos[planes_count];
9226 
9227 		/*
9228 		 * Only allow immediate flips for fast updates that don't
9229 		 * change FB pitch, DCC state, rotation or mirroing.
9230 		 */
9231 		bundle->flip_addrs[planes_count].flip_immediate =
9232 			crtc->state->async_flip &&
9233 			acrtc_state->update_type == UPDATE_TYPE_FAST;
9234 
9235 		timestamp_ns = ktime_get_ns();
9236 		bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
9237 		bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
9238 		bundle->surface_updates[planes_count].surface = dc_plane;
9239 
9240 		if (!bundle->surface_updates[planes_count].surface) {
9241 			DRM_ERROR("No surface for CRTC: id=%d\n",
9242 					acrtc_attach->crtc_id);
9243 			continue;
9244 		}
9245 
9246 		if (plane == pcrtc->primary)
9247 			update_freesync_state_on_stream(
9248 				dm,
9249 				acrtc_state,
9250 				acrtc_state->stream,
9251 				dc_plane,
9252 				bundle->flip_addrs[planes_count].flip_timestamp_in_us);
9253 
9254 		drm_dbg_state(state->dev, "%s Flipping to hi: 0x%x, low: 0x%x\n",
9255 				 __func__,
9256 				 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
9257 				 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
9258 
9259 		planes_count += 1;
9260 
9261 	}
9262 
9263 	if (pflip_present) {
9264 		if (!vrr_active) {
9265 			/* Use old throttling in non-vrr fixed refresh rate mode
9266 			 * to keep flip scheduling based on target vblank counts
9267 			 * working in a backwards compatible way, e.g., for
9268 			 * clients using the GLX_OML_sync_control extension or
9269 			 * DRI3/Present extension with defined target_msc.
9270 			 */
9271 			last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
9272 		}
9273 		else {
9274 			/* For variable refresh rate mode only:
9275 			 * Get vblank of last completed flip to avoid > 1 vrr
9276 			 * flips per video frame by use of throttling, but allow
9277 			 * flip programming anywhere in the possibly large
9278 			 * variable vrr vblank interval for fine-grained flip
9279 			 * timing control and more opportunity to avoid stutter
9280 			 * on late submission of flips.
9281 			 */
9282 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9283 			last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
9284 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9285 		}
9286 
9287 		target_vblank = last_flip_vblank + wait_for_vblank;
9288 
9289 		/*
9290 		 * Wait until we're out of the vertical blank period before the one
9291 		 * targeted by the flip
9292 		 */
9293 		while ((acrtc_attach->enabled &&
9294 			(amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
9295 							    0, &vpos, &hpos, NULL,
9296 							    NULL, &pcrtc->hwmode)
9297 			 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
9298 			(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
9299 			(int)(target_vblank -
9300 			  amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
9301 			usleep_range(1000, 1100);
9302 		}
9303 
9304 		/**
9305 		 * Prepare the flip event for the pageflip interrupt to handle.
9306 		 *
9307 		 * This only works in the case where we've already turned on the
9308 		 * appropriate hardware blocks (eg. HUBP) so in the transition case
9309 		 * from 0 -> n planes we have to skip a hardware generated event
9310 		 * and rely on sending it from software.
9311 		 */
9312 		if (acrtc_attach->base.state->event &&
9313 		    acrtc_state->active_planes > 0 &&
9314 		    !acrtc_state->force_dpms_off) {
9315 			drm_crtc_vblank_get(pcrtc);
9316 
9317 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9318 
9319 			WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
9320 			prepare_flip_isr(acrtc_attach);
9321 
9322 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9323 		}
9324 
9325 		if (acrtc_state->stream) {
9326 			if (acrtc_state->freesync_vrr_info_changed)
9327 				bundle->stream_update.vrr_infopacket =
9328 					&acrtc_state->stream->vrr_infopacket;
9329 		}
9330 	}
9331 
9332 	/* Update the planes if changed or disable if we don't have any. */
9333 	if ((planes_count || acrtc_state->active_planes == 0) &&
9334 		acrtc_state->stream) {
9335 		/*
9336 		 * If PSR or idle optimizations are enabled then flush out
9337 		 * any pending work before hardware programming.
9338 		 */
9339 		if (dm->vblank_control_workqueue)
9340 			flush_workqueue(dm->vblank_control_workqueue);
9341 
9342 		bundle->stream_update.stream = acrtc_state->stream;
9343 		if (new_pcrtc_state->mode_changed) {
9344 			bundle->stream_update.src = acrtc_state->stream->src;
9345 			bundle->stream_update.dst = acrtc_state->stream->dst;
9346 		}
9347 
9348 		if (new_pcrtc_state->color_mgmt_changed) {
9349 			/*
9350 			 * TODO: This isn't fully correct since we've actually
9351 			 * already modified the stream in place.
9352 			 */
9353 			bundle->stream_update.gamut_remap =
9354 				&acrtc_state->stream->gamut_remap_matrix;
9355 			bundle->stream_update.output_csc_transform =
9356 				&acrtc_state->stream->csc_color_matrix;
9357 			bundle->stream_update.out_transfer_func =
9358 				acrtc_state->stream->out_transfer_func;
9359 		}
9360 
9361 		acrtc_state->stream->abm_level = acrtc_state->abm_level;
9362 		if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
9363 			bundle->stream_update.abm_level = &acrtc_state->abm_level;
9364 
9365 		/*
9366 		 * If FreeSync state on the stream has changed then we need to
9367 		 * re-adjust the min/max bounds now that DC doesn't handle this
9368 		 * as part of commit.
9369 		 */
9370 		if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
9371 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9372 			dc_stream_adjust_vmin_vmax(
9373 				dm->dc, acrtc_state->stream,
9374 				&acrtc_attach->dm_irq_params.vrr_params.adjust);
9375 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9376 		}
9377 		mutex_lock(&dm->dc_lock);
9378 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
9379 				acrtc_state->stream->link->psr_settings.psr_allow_active)
9380 			amdgpu_dm_psr_disable(acrtc_state->stream);
9381 
9382 		dc_commit_updates_for_stream(dm->dc,
9383 						     bundle->surface_updates,
9384 						     planes_count,
9385 						     acrtc_state->stream,
9386 						     &bundle->stream_update,
9387 						     dc_state);
9388 
9389 		/**
9390 		 * Enable or disable the interrupts on the backend.
9391 		 *
9392 		 * Most pipes are put into power gating when unused.
9393 		 *
9394 		 * When power gating is enabled on a pipe we lose the
9395 		 * interrupt enablement state when power gating is disabled.
9396 		 *
9397 		 * So we need to update the IRQ control state in hardware
9398 		 * whenever the pipe turns on (since it could be previously
9399 		 * power gated) or off (since some pipes can't be power gated
9400 		 * on some ASICs).
9401 		 */
9402 		if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
9403 			dm_update_pflip_irq_state(drm_to_adev(dev),
9404 						  acrtc_attach);
9405 
9406 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
9407 				acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
9408 				!acrtc_state->stream->link->psr_settings.psr_feature_enabled)
9409 			amdgpu_dm_link_setup_psr(acrtc_state->stream);
9410 
9411 		/* Decrement skip count when PSR is enabled and we're doing fast updates. */
9412 		if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
9413 		    acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
9414 			struct amdgpu_dm_connector *aconn =
9415 				(struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
9416 
9417 			if (aconn->psr_skip_count > 0)
9418 				aconn->psr_skip_count--;
9419 
9420 			/* Allow PSR when skip count is 0. */
9421 			acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count;
9422 		} else {
9423 			acrtc_attach->dm_irq_params.allow_psr_entry = false;
9424 		}
9425 
9426 		mutex_unlock(&dm->dc_lock);
9427 	}
9428 
9429 	/*
9430 	 * Update cursor state *after* programming all the planes.
9431 	 * This avoids redundant programming in the case where we're going
9432 	 * to be disabling a single plane - those pipes are being disabled.
9433 	 */
9434 	if (acrtc_state->active_planes)
9435 		amdgpu_dm_commit_cursors(state);
9436 
9437 cleanup:
9438 	kfree(bundle);
9439 }
9440 
9441 static void amdgpu_dm_commit_audio(struct drm_device *dev,
9442 				   struct drm_atomic_state *state)
9443 {
9444 	struct amdgpu_device *adev = drm_to_adev(dev);
9445 	struct amdgpu_dm_connector *aconnector;
9446 	struct drm_connector *connector;
9447 	struct drm_connector_state *old_con_state, *new_con_state;
9448 	struct drm_crtc_state *new_crtc_state;
9449 	struct dm_crtc_state *new_dm_crtc_state;
9450 	const struct dc_stream_status *status;
9451 	int i, inst;
9452 
9453 	/* Notify device removals. */
9454 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9455 		if (old_con_state->crtc != new_con_state->crtc) {
9456 			/* CRTC changes require notification. */
9457 			goto notify;
9458 		}
9459 
9460 		if (!new_con_state->crtc)
9461 			continue;
9462 
9463 		new_crtc_state = drm_atomic_get_new_crtc_state(
9464 			state, new_con_state->crtc);
9465 
9466 		if (!new_crtc_state)
9467 			continue;
9468 
9469 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9470 			continue;
9471 
9472 	notify:
9473 		aconnector = to_amdgpu_dm_connector(connector);
9474 
9475 		mutex_lock(&adev->dm.audio_lock);
9476 		inst = aconnector->audio_inst;
9477 		aconnector->audio_inst = -1;
9478 		mutex_unlock(&adev->dm.audio_lock);
9479 
9480 		amdgpu_dm_audio_eld_notify(adev, inst);
9481 	}
9482 
9483 	/* Notify audio device additions. */
9484 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
9485 		if (!new_con_state->crtc)
9486 			continue;
9487 
9488 		new_crtc_state = drm_atomic_get_new_crtc_state(
9489 			state, new_con_state->crtc);
9490 
9491 		if (!new_crtc_state)
9492 			continue;
9493 
9494 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9495 			continue;
9496 
9497 		new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
9498 		if (!new_dm_crtc_state->stream)
9499 			continue;
9500 
9501 		status = dc_stream_get_status(new_dm_crtc_state->stream);
9502 		if (!status)
9503 			continue;
9504 
9505 		aconnector = to_amdgpu_dm_connector(connector);
9506 
9507 		mutex_lock(&adev->dm.audio_lock);
9508 		inst = status->audio_inst;
9509 		aconnector->audio_inst = inst;
9510 		mutex_unlock(&adev->dm.audio_lock);
9511 
9512 		amdgpu_dm_audio_eld_notify(adev, inst);
9513 	}
9514 }
9515 
9516 /*
9517  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
9518  * @crtc_state: the DRM CRTC state
9519  * @stream_state: the DC stream state.
9520  *
9521  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
9522  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
9523  */
9524 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
9525 						struct dc_stream_state *stream_state)
9526 {
9527 	stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
9528 }
9529 
9530 /**
9531  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
9532  * @state: The atomic state to commit
9533  *
9534  * This will tell DC to commit the constructed DC state from atomic_check,
9535  * programming the hardware. Any failures here implies a hardware failure, since
9536  * atomic check should have filtered anything non-kosher.
9537  */
9538 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
9539 {
9540 	struct drm_device *dev = state->dev;
9541 	struct amdgpu_device *adev = drm_to_adev(dev);
9542 	struct amdgpu_display_manager *dm = &adev->dm;
9543 	struct dm_atomic_state *dm_state;
9544 	struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
9545 	uint32_t i, j;
9546 	struct drm_crtc *crtc;
9547 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9548 	unsigned long flags;
9549 	bool wait_for_vblank = true;
9550 	struct drm_connector *connector;
9551 	struct drm_connector_state *old_con_state, *new_con_state;
9552 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9553 	int crtc_disable_count = 0;
9554 	bool mode_set_reset_required = false;
9555 	int r;
9556 
9557 	trace_amdgpu_dm_atomic_commit_tail_begin(state);
9558 
9559 	r = drm_atomic_helper_wait_for_fences(dev, state, false);
9560 	if (unlikely(r))
9561 		DRM_ERROR("Waiting for fences timed out!");
9562 
9563 	drm_atomic_helper_update_legacy_modeset_state(dev, state);
9564 
9565 	dm_state = dm_atomic_get_new_state(state);
9566 	if (dm_state && dm_state->context) {
9567 		dc_state = dm_state->context;
9568 	} else {
9569 		/* No state changes, retain current state. */
9570 		dc_state_temp = dc_create_state(dm->dc);
9571 		ASSERT(dc_state_temp);
9572 		dc_state = dc_state_temp;
9573 		dc_resource_state_copy_construct_current(dm->dc, dc_state);
9574 	}
9575 
9576 	for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
9577 				       new_crtc_state, i) {
9578 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9579 
9580 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9581 
9582 		if (old_crtc_state->active &&
9583 		    (!new_crtc_state->active ||
9584 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9585 			manage_dm_interrupts(adev, acrtc, false);
9586 			dc_stream_release(dm_old_crtc_state->stream);
9587 		}
9588 	}
9589 
9590 	drm_atomic_helper_calc_timestamping_constants(state);
9591 
9592 	/* update changed items */
9593 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9594 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9595 
9596 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9597 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9598 
9599 		drm_dbg_state(state->dev,
9600 			"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9601 			"planes_changed:%d, mode_changed:%d,active_changed:%d,"
9602 			"connectors_changed:%d\n",
9603 			acrtc->crtc_id,
9604 			new_crtc_state->enable,
9605 			new_crtc_state->active,
9606 			new_crtc_state->planes_changed,
9607 			new_crtc_state->mode_changed,
9608 			new_crtc_state->active_changed,
9609 			new_crtc_state->connectors_changed);
9610 
9611 		/* Disable cursor if disabling crtc */
9612 		if (old_crtc_state->active && !new_crtc_state->active) {
9613 			struct dc_cursor_position position;
9614 
9615 			memset(&position, 0, sizeof(position));
9616 			mutex_lock(&dm->dc_lock);
9617 			dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
9618 			mutex_unlock(&dm->dc_lock);
9619 		}
9620 
9621 		/* Copy all transient state flags into dc state */
9622 		if (dm_new_crtc_state->stream) {
9623 			amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
9624 							    dm_new_crtc_state->stream);
9625 		}
9626 
9627 		/* handles headless hotplug case, updating new_state and
9628 		 * aconnector as needed
9629 		 */
9630 
9631 		if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
9632 
9633 			DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
9634 
9635 			if (!dm_new_crtc_state->stream) {
9636 				/*
9637 				 * this could happen because of issues with
9638 				 * userspace notifications delivery.
9639 				 * In this case userspace tries to set mode on
9640 				 * display which is disconnected in fact.
9641 				 * dc_sink is NULL in this case on aconnector.
9642 				 * We expect reset mode will come soon.
9643 				 *
9644 				 * This can also happen when unplug is done
9645 				 * during resume sequence ended
9646 				 *
9647 				 * In this case, we want to pretend we still
9648 				 * have a sink to keep the pipe running so that
9649 				 * hw state is consistent with the sw state
9650 				 */
9651 				DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9652 						__func__, acrtc->base.base.id);
9653 				continue;
9654 			}
9655 
9656 			if (dm_old_crtc_state->stream)
9657 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
9658 
9659 			pm_runtime_get_noresume(dev->dev);
9660 
9661 			acrtc->enabled = true;
9662 			acrtc->hw_mode = new_crtc_state->mode;
9663 			crtc->hwmode = new_crtc_state->mode;
9664 			mode_set_reset_required = true;
9665 		} else if (modereset_required(new_crtc_state)) {
9666 			DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
9667 			/* i.e. reset mode */
9668 			if (dm_old_crtc_state->stream)
9669 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
9670 
9671 			mode_set_reset_required = true;
9672 		}
9673 	} /* for_each_crtc_in_state() */
9674 
9675 	if (dc_state) {
9676 		/* if there mode set or reset, disable eDP PSR */
9677 		if (mode_set_reset_required) {
9678 			if (dm->vblank_control_workqueue)
9679 				flush_workqueue(dm->vblank_control_workqueue);
9680 
9681 			amdgpu_dm_psr_disable_all(dm);
9682 		}
9683 
9684 		dm_enable_per_frame_crtc_master_sync(dc_state);
9685 		mutex_lock(&dm->dc_lock);
9686 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
9687 
9688 		/* Allow idle optimization when vblank count is 0 for display off */
9689 		if (dm->active_vblank_irq_count == 0)
9690 			dc_allow_idle_optimizations(dm->dc, true);
9691 		mutex_unlock(&dm->dc_lock);
9692 	}
9693 
9694 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9695 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9696 
9697 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9698 
9699 		if (dm_new_crtc_state->stream != NULL) {
9700 			const struct dc_stream_status *status =
9701 					dc_stream_get_status(dm_new_crtc_state->stream);
9702 
9703 			if (!status)
9704 				status = dc_stream_get_status_from_state(dc_state,
9705 									 dm_new_crtc_state->stream);
9706 			if (!status)
9707 				DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
9708 			else
9709 				acrtc->otg_inst = status->primary_otg_inst;
9710 		}
9711 	}
9712 #ifdef CONFIG_DRM_AMD_DC_HDCP
9713 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9714 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9715 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9716 		struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9717 
9718 		new_crtc_state = NULL;
9719 
9720 		if (acrtc)
9721 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9722 
9723 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9724 
9725 		if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
9726 		    connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
9727 			hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
9728 			new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
9729 			dm_new_con_state->update_hdcp = true;
9730 			continue;
9731 		}
9732 
9733 		if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
9734 			hdcp_update_display(
9735 				adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
9736 				new_con_state->hdcp_content_type,
9737 				new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
9738 	}
9739 #endif
9740 
9741 	/* Handle connector state changes */
9742 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9743 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9744 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9745 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9746 		struct dc_surface_update dummy_updates[MAX_SURFACES];
9747 		struct dc_stream_update stream_update;
9748 		struct dc_info_packet hdr_packet;
9749 		struct dc_stream_status *status = NULL;
9750 		bool abm_changed, hdr_changed, scaling_changed;
9751 
9752 		memset(&dummy_updates, 0, sizeof(dummy_updates));
9753 		memset(&stream_update, 0, sizeof(stream_update));
9754 
9755 		if (acrtc) {
9756 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9757 			old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
9758 		}
9759 
9760 		/* Skip any modesets/resets */
9761 		if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
9762 			continue;
9763 
9764 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9765 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9766 
9767 		scaling_changed = is_scaling_state_different(dm_new_con_state,
9768 							     dm_old_con_state);
9769 
9770 		abm_changed = dm_new_crtc_state->abm_level !=
9771 			      dm_old_crtc_state->abm_level;
9772 
9773 		hdr_changed =
9774 			!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
9775 
9776 		if (!scaling_changed && !abm_changed && !hdr_changed)
9777 			continue;
9778 
9779 		stream_update.stream = dm_new_crtc_state->stream;
9780 		if (scaling_changed) {
9781 			update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
9782 					dm_new_con_state, dm_new_crtc_state->stream);
9783 
9784 			stream_update.src = dm_new_crtc_state->stream->src;
9785 			stream_update.dst = dm_new_crtc_state->stream->dst;
9786 		}
9787 
9788 		if (abm_changed) {
9789 			dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
9790 
9791 			stream_update.abm_level = &dm_new_crtc_state->abm_level;
9792 		}
9793 
9794 		if (hdr_changed) {
9795 			fill_hdr_info_packet(new_con_state, &hdr_packet);
9796 			stream_update.hdr_static_metadata = &hdr_packet;
9797 		}
9798 
9799 		status = dc_stream_get_status(dm_new_crtc_state->stream);
9800 
9801 		if (WARN_ON(!status))
9802 			continue;
9803 
9804 		WARN_ON(!status->plane_count);
9805 
9806 		/*
9807 		 * TODO: DC refuses to perform stream updates without a dc_surface_update.
9808 		 * Here we create an empty update on each plane.
9809 		 * To fix this, DC should permit updating only stream properties.
9810 		 */
9811 		for (j = 0; j < status->plane_count; j++)
9812 			dummy_updates[j].surface = status->plane_states[0];
9813 
9814 
9815 		mutex_lock(&dm->dc_lock);
9816 		dc_commit_updates_for_stream(dm->dc,
9817 						     dummy_updates,
9818 						     status->plane_count,
9819 						     dm_new_crtc_state->stream,
9820 						     &stream_update,
9821 						     dc_state);
9822 		mutex_unlock(&dm->dc_lock);
9823 	}
9824 
9825 	/* Count number of newly disabled CRTCs for dropping PM refs later. */
9826 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
9827 				      new_crtc_state, i) {
9828 		if (old_crtc_state->active && !new_crtc_state->active)
9829 			crtc_disable_count++;
9830 
9831 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9832 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9833 
9834 		/* For freesync config update on crtc state and params for irq */
9835 		update_stream_irq_parameters(dm, dm_new_crtc_state);
9836 
9837 		/* Handle vrr on->off / off->on transitions */
9838 		amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
9839 						dm_new_crtc_state);
9840 	}
9841 
9842 	/**
9843 	 * Enable interrupts for CRTCs that are newly enabled or went through
9844 	 * a modeset. It was intentionally deferred until after the front end
9845 	 * state was modified to wait until the OTG was on and so the IRQ
9846 	 * handlers didn't access stale or invalid state.
9847 	 */
9848 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9849 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9850 #ifdef CONFIG_DEBUG_FS
9851 		bool configure_crc = false;
9852 		enum amdgpu_dm_pipe_crc_source cur_crc_src;
9853 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9854 		struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk;
9855 #endif
9856 		spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9857 		cur_crc_src = acrtc->dm_irq_params.crc_src;
9858 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9859 #endif
9860 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9861 
9862 		if (new_crtc_state->active &&
9863 		    (!old_crtc_state->active ||
9864 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9865 			dc_stream_retain(dm_new_crtc_state->stream);
9866 			acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
9867 			manage_dm_interrupts(adev, acrtc, true);
9868 
9869 #ifdef CONFIG_DEBUG_FS
9870 			/**
9871 			 * Frontend may have changed so reapply the CRC capture
9872 			 * settings for the stream.
9873 			 */
9874 			dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9875 
9876 			if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
9877 				configure_crc = true;
9878 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9879 				if (amdgpu_dm_crc_window_is_activated(crtc)) {
9880 					spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9881 					acrtc->dm_irq_params.crc_window.update_win = true;
9882 					acrtc->dm_irq_params.crc_window.skip_frame_cnt = 2;
9883 					spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
9884 					crc_rd_wrk->crtc = crtc;
9885 					spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
9886 					spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9887 				}
9888 #endif
9889 			}
9890 
9891 			if (configure_crc)
9892 				if (amdgpu_dm_crtc_configure_crc_source(
9893 					crtc, dm_new_crtc_state, cur_crc_src))
9894 					DRM_DEBUG_DRIVER("Failed to configure crc source");
9895 #endif
9896 		}
9897 	}
9898 
9899 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
9900 		if (new_crtc_state->async_flip)
9901 			wait_for_vblank = false;
9902 
9903 	/* update planes when needed per crtc*/
9904 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
9905 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9906 
9907 		if (dm_new_crtc_state->stream)
9908 			amdgpu_dm_commit_planes(state, dc_state, dev,
9909 						dm, crtc, wait_for_vblank);
9910 	}
9911 
9912 	/* Update audio instances for each connector. */
9913 	amdgpu_dm_commit_audio(dev, state);
9914 
9915 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||		\
9916 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
9917 	/* restore the backlight level */
9918 	for (i = 0; i < dm->num_of_edps; i++) {
9919 		if (dm->backlight_dev[i] &&
9920 		    (dm->actual_brightness[i] != dm->brightness[i]))
9921 			amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
9922 	}
9923 #endif
9924 	/*
9925 	 * send vblank event on all events not handled in flip and
9926 	 * mark consumed event for drm_atomic_helper_commit_hw_done
9927 	 */
9928 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9929 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9930 
9931 		if (new_crtc_state->event)
9932 			drm_send_event_locked(dev, &new_crtc_state->event->base);
9933 
9934 		new_crtc_state->event = NULL;
9935 	}
9936 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9937 
9938 	/* Signal HW programming completion */
9939 	drm_atomic_helper_commit_hw_done(state);
9940 
9941 	if (wait_for_vblank)
9942 		drm_atomic_helper_wait_for_flip_done(dev, state);
9943 
9944 	drm_atomic_helper_cleanup_planes(dev, state);
9945 
9946 	/* return the stolen vga memory back to VRAM */
9947 	if (!adev->mman.keep_stolen_vga_memory)
9948 		amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
9949 	amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
9950 
9951 	/*
9952 	 * Finally, drop a runtime PM reference for each newly disabled CRTC,
9953 	 * so we can put the GPU into runtime suspend if we're not driving any
9954 	 * displays anymore
9955 	 */
9956 	for (i = 0; i < crtc_disable_count; i++)
9957 		pm_runtime_put_autosuspend(dev->dev);
9958 	pm_runtime_mark_last_busy(dev->dev);
9959 
9960 	if (dc_state_temp)
9961 		dc_release_state(dc_state_temp);
9962 }
9963 
9964 
9965 static int dm_force_atomic_commit(struct drm_connector *connector)
9966 {
9967 	int ret = 0;
9968 	struct drm_device *ddev = connector->dev;
9969 	struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
9970 	struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9971 	struct drm_plane *plane = disconnected_acrtc->base.primary;
9972 	struct drm_connector_state *conn_state;
9973 	struct drm_crtc_state *crtc_state;
9974 	struct drm_plane_state *plane_state;
9975 
9976 	if (!state)
9977 		return -ENOMEM;
9978 
9979 	state->acquire_ctx = ddev->mode_config.acquire_ctx;
9980 
9981 	/* Construct an atomic state to restore previous display setting */
9982 
9983 	/*
9984 	 * Attach connectors to drm_atomic_state
9985 	 */
9986 	conn_state = drm_atomic_get_connector_state(state, connector);
9987 
9988 	ret = PTR_ERR_OR_ZERO(conn_state);
9989 	if (ret)
9990 		goto out;
9991 
9992 	/* Attach crtc to drm_atomic_state*/
9993 	crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
9994 
9995 	ret = PTR_ERR_OR_ZERO(crtc_state);
9996 	if (ret)
9997 		goto out;
9998 
9999 	/* force a restore */
10000 	crtc_state->mode_changed = true;
10001 
10002 	/* Attach plane to drm_atomic_state */
10003 	plane_state = drm_atomic_get_plane_state(state, plane);
10004 
10005 	ret = PTR_ERR_OR_ZERO(plane_state);
10006 	if (ret)
10007 		goto out;
10008 
10009 	/* Call commit internally with the state we just constructed */
10010 	ret = drm_atomic_commit(state);
10011 
10012 out:
10013 	drm_atomic_state_put(state);
10014 	if (ret)
10015 		DRM_ERROR("Restoring old state failed with %i\n", ret);
10016 
10017 	return ret;
10018 }
10019 
10020 /*
10021  * This function handles all cases when set mode does not come upon hotplug.
10022  * This includes when a display is unplugged then plugged back into the
10023  * same port and when running without usermode desktop manager supprot
10024  */
10025 void dm_restore_drm_connector_state(struct drm_device *dev,
10026 				    struct drm_connector *connector)
10027 {
10028 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
10029 	struct amdgpu_crtc *disconnected_acrtc;
10030 	struct dm_crtc_state *acrtc_state;
10031 
10032 	if (!aconnector->dc_sink || !connector->state || !connector->encoder)
10033 		return;
10034 
10035 	disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
10036 	if (!disconnected_acrtc)
10037 		return;
10038 
10039 	acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
10040 	if (!acrtc_state->stream)
10041 		return;
10042 
10043 	/*
10044 	 * If the previous sink is not released and different from the current,
10045 	 * we deduce we are in a state where we can not rely on usermode call
10046 	 * to turn on the display, so we do it here
10047 	 */
10048 	if (acrtc_state->stream->sink != aconnector->dc_sink)
10049 		dm_force_atomic_commit(&aconnector->base);
10050 }
10051 
10052 /*
10053  * Grabs all modesetting locks to serialize against any blocking commits,
10054  * Waits for completion of all non blocking commits.
10055  */
10056 static int do_aquire_global_lock(struct drm_device *dev,
10057 				 struct drm_atomic_state *state)
10058 {
10059 	struct drm_crtc *crtc;
10060 	struct drm_crtc_commit *commit;
10061 	long ret;
10062 
10063 	/*
10064 	 * Adding all modeset locks to aquire_ctx will
10065 	 * ensure that when the framework release it the
10066 	 * extra locks we are locking here will get released to
10067 	 */
10068 	ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
10069 	if (ret)
10070 		return ret;
10071 
10072 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
10073 		spin_lock(&crtc->commit_lock);
10074 		commit = list_first_entry_or_null(&crtc->commit_list,
10075 				struct drm_crtc_commit, commit_entry);
10076 		if (commit)
10077 			drm_crtc_commit_get(commit);
10078 		spin_unlock(&crtc->commit_lock);
10079 
10080 		if (!commit)
10081 			continue;
10082 
10083 		/*
10084 		 * Make sure all pending HW programming completed and
10085 		 * page flips done
10086 		 */
10087 		ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
10088 
10089 		if (ret > 0)
10090 			ret = wait_for_completion_interruptible_timeout(
10091 					&commit->flip_done, 10*HZ);
10092 
10093 		if (ret == 0)
10094 			DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
10095 				  "timed out\n", crtc->base.id, crtc->name);
10096 
10097 		drm_crtc_commit_put(commit);
10098 	}
10099 
10100 	return ret < 0 ? ret : 0;
10101 }
10102 
10103 static void get_freesync_config_for_crtc(
10104 	struct dm_crtc_state *new_crtc_state,
10105 	struct dm_connector_state *new_con_state)
10106 {
10107 	struct mod_freesync_config config = {0};
10108 	struct amdgpu_dm_connector *aconnector =
10109 			to_amdgpu_dm_connector(new_con_state->base.connector);
10110 	struct drm_display_mode *mode = &new_crtc_state->base.mode;
10111 	int vrefresh = drm_mode_vrefresh(mode);
10112 	bool fs_vid_mode = false;
10113 
10114 	new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
10115 					vrefresh >= aconnector->min_vfreq &&
10116 					vrefresh <= aconnector->max_vfreq;
10117 
10118 	if (new_crtc_state->vrr_supported) {
10119 		new_crtc_state->stream->ignore_msa_timing_param = true;
10120 		fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
10121 
10122 		config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
10123 		config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
10124 		config.vsif_supported = true;
10125 		config.btr = true;
10126 
10127 		if (fs_vid_mode) {
10128 			config.state = VRR_STATE_ACTIVE_FIXED;
10129 			config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
10130 			goto out;
10131 		} else if (new_crtc_state->base.vrr_enabled) {
10132 			config.state = VRR_STATE_ACTIVE_VARIABLE;
10133 		} else {
10134 			config.state = VRR_STATE_INACTIVE;
10135 		}
10136 	}
10137 out:
10138 	new_crtc_state->freesync_config = config;
10139 }
10140 
10141 static void reset_freesync_config_for_crtc(
10142 	struct dm_crtc_state *new_crtc_state)
10143 {
10144 	new_crtc_state->vrr_supported = false;
10145 
10146 	memset(&new_crtc_state->vrr_infopacket, 0,
10147 	       sizeof(new_crtc_state->vrr_infopacket));
10148 }
10149 
10150 static bool
10151 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
10152 				 struct drm_crtc_state *new_crtc_state)
10153 {
10154 	const struct drm_display_mode *old_mode, *new_mode;
10155 
10156 	if (!old_crtc_state || !new_crtc_state)
10157 		return false;
10158 
10159 	old_mode = &old_crtc_state->mode;
10160 	new_mode = &new_crtc_state->mode;
10161 
10162 	if (old_mode->clock       == new_mode->clock &&
10163 	    old_mode->hdisplay    == new_mode->hdisplay &&
10164 	    old_mode->vdisplay    == new_mode->vdisplay &&
10165 	    old_mode->htotal      == new_mode->htotal &&
10166 	    old_mode->vtotal      != new_mode->vtotal &&
10167 	    old_mode->hsync_start == new_mode->hsync_start &&
10168 	    old_mode->vsync_start != new_mode->vsync_start &&
10169 	    old_mode->hsync_end   == new_mode->hsync_end &&
10170 	    old_mode->vsync_end   != new_mode->vsync_end &&
10171 	    old_mode->hskew       == new_mode->hskew &&
10172 	    old_mode->vscan       == new_mode->vscan &&
10173 	    (old_mode->vsync_end - old_mode->vsync_start) ==
10174 	    (new_mode->vsync_end - new_mode->vsync_start))
10175 		return true;
10176 
10177 	return false;
10178 }
10179 
10180 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
10181 	uint64_t num, den, res;
10182 	struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
10183 
10184 	dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
10185 
10186 	num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
10187 	den = (unsigned long long)new_crtc_state->mode.htotal *
10188 	      (unsigned long long)new_crtc_state->mode.vtotal;
10189 
10190 	res = div_u64(num, den);
10191 	dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
10192 }
10193 
10194 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
10195 			 struct drm_atomic_state *state,
10196 			 struct drm_crtc *crtc,
10197 			 struct drm_crtc_state *old_crtc_state,
10198 			 struct drm_crtc_state *new_crtc_state,
10199 			 bool enable,
10200 			 bool *lock_and_validation_needed)
10201 {
10202 	struct dm_atomic_state *dm_state = NULL;
10203 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
10204 	struct dc_stream_state *new_stream;
10205 	int ret = 0;
10206 
10207 	/*
10208 	 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
10209 	 * update changed items
10210 	 */
10211 	struct amdgpu_crtc *acrtc = NULL;
10212 	struct amdgpu_dm_connector *aconnector = NULL;
10213 	struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
10214 	struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
10215 
10216 	new_stream = NULL;
10217 
10218 	dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10219 	dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10220 	acrtc = to_amdgpu_crtc(crtc);
10221 	aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
10222 
10223 	/* TODO This hack should go away */
10224 	if (aconnector && enable) {
10225 		/* Make sure fake sink is created in plug-in scenario */
10226 		drm_new_conn_state = drm_atomic_get_new_connector_state(state,
10227 							    &aconnector->base);
10228 		drm_old_conn_state = drm_atomic_get_old_connector_state(state,
10229 							    &aconnector->base);
10230 
10231 		if (IS_ERR(drm_new_conn_state)) {
10232 			ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
10233 			goto fail;
10234 		}
10235 
10236 		dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
10237 		dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
10238 
10239 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10240 			goto skip_modeset;
10241 
10242 		new_stream = create_validate_stream_for_sink(aconnector,
10243 							     &new_crtc_state->mode,
10244 							     dm_new_conn_state,
10245 							     dm_old_crtc_state->stream);
10246 
10247 		/*
10248 		 * we can have no stream on ACTION_SET if a display
10249 		 * was disconnected during S3, in this case it is not an
10250 		 * error, the OS will be updated after detection, and
10251 		 * will do the right thing on next atomic commit
10252 		 */
10253 
10254 		if (!new_stream) {
10255 			DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
10256 					__func__, acrtc->base.base.id);
10257 			ret = -ENOMEM;
10258 			goto fail;
10259 		}
10260 
10261 		/*
10262 		 * TODO: Check VSDB bits to decide whether this should
10263 		 * be enabled or not.
10264 		 */
10265 		new_stream->triggered_crtc_reset.enabled =
10266 			dm->force_timing_sync;
10267 
10268 		dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10269 
10270 		ret = fill_hdr_info_packet(drm_new_conn_state,
10271 					   &new_stream->hdr_static_metadata);
10272 		if (ret)
10273 			goto fail;
10274 
10275 		/*
10276 		 * If we already removed the old stream from the context
10277 		 * (and set the new stream to NULL) then we can't reuse
10278 		 * the old stream even if the stream and scaling are unchanged.
10279 		 * We'll hit the BUG_ON and black screen.
10280 		 *
10281 		 * TODO: Refactor this function to allow this check to work
10282 		 * in all conditions.
10283 		 */
10284 		if (dm_new_crtc_state->stream &&
10285 		    is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
10286 			goto skip_modeset;
10287 
10288 		if (dm_new_crtc_state->stream &&
10289 		    dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
10290 		    dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
10291 			new_crtc_state->mode_changed = false;
10292 			DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
10293 					 new_crtc_state->mode_changed);
10294 		}
10295 	}
10296 
10297 	/* mode_changed flag may get updated above, need to check again */
10298 	if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10299 		goto skip_modeset;
10300 
10301 	drm_dbg_state(state->dev,
10302 		"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
10303 		"planes_changed:%d, mode_changed:%d,active_changed:%d,"
10304 		"connectors_changed:%d\n",
10305 		acrtc->crtc_id,
10306 		new_crtc_state->enable,
10307 		new_crtc_state->active,
10308 		new_crtc_state->planes_changed,
10309 		new_crtc_state->mode_changed,
10310 		new_crtc_state->active_changed,
10311 		new_crtc_state->connectors_changed);
10312 
10313 	/* Remove stream for any changed/disabled CRTC */
10314 	if (!enable) {
10315 
10316 		if (!dm_old_crtc_state->stream)
10317 			goto skip_modeset;
10318 
10319 		if (dm_new_crtc_state->stream &&
10320 		    is_timing_unchanged_for_freesync(new_crtc_state,
10321 						     old_crtc_state)) {
10322 			new_crtc_state->mode_changed = false;
10323 			DRM_DEBUG_DRIVER(
10324 				"Mode change not required for front porch change, "
10325 				"setting mode_changed to %d",
10326 				new_crtc_state->mode_changed);
10327 
10328 			set_freesync_fixed_config(dm_new_crtc_state);
10329 
10330 			goto skip_modeset;
10331 		} else if (aconnector &&
10332 			   is_freesync_video_mode(&new_crtc_state->mode,
10333 						  aconnector)) {
10334 			struct drm_display_mode *high_mode;
10335 
10336 			high_mode = get_highest_refresh_rate_mode(aconnector, false);
10337 			if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) {
10338 				set_freesync_fixed_config(dm_new_crtc_state);
10339 			}
10340 		}
10341 
10342 		ret = dm_atomic_get_state(state, &dm_state);
10343 		if (ret)
10344 			goto fail;
10345 
10346 		DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
10347 				crtc->base.id);
10348 
10349 		/* i.e. reset mode */
10350 		if (dc_remove_stream_from_ctx(
10351 				dm->dc,
10352 				dm_state->context,
10353 				dm_old_crtc_state->stream) != DC_OK) {
10354 			ret = -EINVAL;
10355 			goto fail;
10356 		}
10357 
10358 		dc_stream_release(dm_old_crtc_state->stream);
10359 		dm_new_crtc_state->stream = NULL;
10360 
10361 		reset_freesync_config_for_crtc(dm_new_crtc_state);
10362 
10363 		*lock_and_validation_needed = true;
10364 
10365 	} else {/* Add stream for any updated/enabled CRTC */
10366 		/*
10367 		 * Quick fix to prevent NULL pointer on new_stream when
10368 		 * added MST connectors not found in existing crtc_state in the chained mode
10369 		 * TODO: need to dig out the root cause of that
10370 		 */
10371 		if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
10372 			goto skip_modeset;
10373 
10374 		if (modereset_required(new_crtc_state))
10375 			goto skip_modeset;
10376 
10377 		if (modeset_required(new_crtc_state, new_stream,
10378 				     dm_old_crtc_state->stream)) {
10379 
10380 			WARN_ON(dm_new_crtc_state->stream);
10381 
10382 			ret = dm_atomic_get_state(state, &dm_state);
10383 			if (ret)
10384 				goto fail;
10385 
10386 			dm_new_crtc_state->stream = new_stream;
10387 
10388 			dc_stream_retain(new_stream);
10389 
10390 			DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
10391 					 crtc->base.id);
10392 
10393 			if (dc_add_stream_to_ctx(
10394 					dm->dc,
10395 					dm_state->context,
10396 					dm_new_crtc_state->stream) != DC_OK) {
10397 				ret = -EINVAL;
10398 				goto fail;
10399 			}
10400 
10401 			*lock_and_validation_needed = true;
10402 		}
10403 	}
10404 
10405 skip_modeset:
10406 	/* Release extra reference */
10407 	if (new_stream)
10408 		 dc_stream_release(new_stream);
10409 
10410 	/*
10411 	 * We want to do dc stream updates that do not require a
10412 	 * full modeset below.
10413 	 */
10414 	if (!(enable && aconnector && new_crtc_state->active))
10415 		return 0;
10416 	/*
10417 	 * Given above conditions, the dc state cannot be NULL because:
10418 	 * 1. We're in the process of enabling CRTCs (just been added
10419 	 *    to the dc context, or already is on the context)
10420 	 * 2. Has a valid connector attached, and
10421 	 * 3. Is currently active and enabled.
10422 	 * => The dc stream state currently exists.
10423 	 */
10424 	BUG_ON(dm_new_crtc_state->stream == NULL);
10425 
10426 	/* Scaling or underscan settings */
10427 	if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
10428 				drm_atomic_crtc_needs_modeset(new_crtc_state))
10429 		update_stream_scaling_settings(
10430 			&new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
10431 
10432 	/* ABM settings */
10433 	dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10434 
10435 	/*
10436 	 * Color management settings. We also update color properties
10437 	 * when a modeset is needed, to ensure it gets reprogrammed.
10438 	 */
10439 	if (dm_new_crtc_state->base.color_mgmt_changed ||
10440 	    drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10441 		ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
10442 		if (ret)
10443 			goto fail;
10444 	}
10445 
10446 	/* Update Freesync settings. */
10447 	get_freesync_config_for_crtc(dm_new_crtc_state,
10448 				     dm_new_conn_state);
10449 
10450 	return ret;
10451 
10452 fail:
10453 	if (new_stream)
10454 		dc_stream_release(new_stream);
10455 	return ret;
10456 }
10457 
10458 static bool should_reset_plane(struct drm_atomic_state *state,
10459 			       struct drm_plane *plane,
10460 			       struct drm_plane_state *old_plane_state,
10461 			       struct drm_plane_state *new_plane_state)
10462 {
10463 	struct drm_plane *other;
10464 	struct drm_plane_state *old_other_state, *new_other_state;
10465 	struct drm_crtc_state *new_crtc_state;
10466 	int i;
10467 
10468 	/*
10469 	 * TODO: Remove this hack once the checks below are sufficient
10470 	 * enough to determine when we need to reset all the planes on
10471 	 * the stream.
10472 	 */
10473 	if (state->allow_modeset)
10474 		return true;
10475 
10476 	/* Exit early if we know that we're adding or removing the plane. */
10477 	if (old_plane_state->crtc != new_plane_state->crtc)
10478 		return true;
10479 
10480 	/* old crtc == new_crtc == NULL, plane not in context. */
10481 	if (!new_plane_state->crtc)
10482 		return false;
10483 
10484 	new_crtc_state =
10485 		drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
10486 
10487 	if (!new_crtc_state)
10488 		return true;
10489 
10490 	/* CRTC Degamma changes currently require us to recreate planes. */
10491 	if (new_crtc_state->color_mgmt_changed)
10492 		return true;
10493 
10494 	if (drm_atomic_crtc_needs_modeset(new_crtc_state))
10495 		return true;
10496 
10497 	/*
10498 	 * If there are any new primary or overlay planes being added or
10499 	 * removed then the z-order can potentially change. To ensure
10500 	 * correct z-order and pipe acquisition the current DC architecture
10501 	 * requires us to remove and recreate all existing planes.
10502 	 *
10503 	 * TODO: Come up with a more elegant solution for this.
10504 	 */
10505 	for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
10506 		struct amdgpu_framebuffer *old_afb, *new_afb;
10507 		if (other->type == DRM_PLANE_TYPE_CURSOR)
10508 			continue;
10509 
10510 		if (old_other_state->crtc != new_plane_state->crtc &&
10511 		    new_other_state->crtc != new_plane_state->crtc)
10512 			continue;
10513 
10514 		if (old_other_state->crtc != new_other_state->crtc)
10515 			return true;
10516 
10517 		/* Src/dst size and scaling updates. */
10518 		if (old_other_state->src_w != new_other_state->src_w ||
10519 		    old_other_state->src_h != new_other_state->src_h ||
10520 		    old_other_state->crtc_w != new_other_state->crtc_w ||
10521 		    old_other_state->crtc_h != new_other_state->crtc_h)
10522 			return true;
10523 
10524 		/* Rotation / mirroring updates. */
10525 		if (old_other_state->rotation != new_other_state->rotation)
10526 			return true;
10527 
10528 		/* Blending updates. */
10529 		if (old_other_state->pixel_blend_mode !=
10530 		    new_other_state->pixel_blend_mode)
10531 			return true;
10532 
10533 		/* Alpha updates. */
10534 		if (old_other_state->alpha != new_other_state->alpha)
10535 			return true;
10536 
10537 		/* Colorspace changes. */
10538 		if (old_other_state->color_range != new_other_state->color_range ||
10539 		    old_other_state->color_encoding != new_other_state->color_encoding)
10540 			return true;
10541 
10542 		/* Framebuffer checks fall at the end. */
10543 		if (!old_other_state->fb || !new_other_state->fb)
10544 			continue;
10545 
10546 		/* Pixel format changes can require bandwidth updates. */
10547 		if (old_other_state->fb->format != new_other_state->fb->format)
10548 			return true;
10549 
10550 		old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
10551 		new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
10552 
10553 		/* Tiling and DCC changes also require bandwidth updates. */
10554 		if (old_afb->tiling_flags != new_afb->tiling_flags ||
10555 		    old_afb->base.modifier != new_afb->base.modifier)
10556 			return true;
10557 	}
10558 
10559 	return false;
10560 }
10561 
10562 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
10563 			      struct drm_plane_state *new_plane_state,
10564 			      struct drm_framebuffer *fb)
10565 {
10566 	struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
10567 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
10568 	unsigned int pitch;
10569 	bool linear;
10570 
10571 	if (fb->width > new_acrtc->max_cursor_width ||
10572 	    fb->height > new_acrtc->max_cursor_height) {
10573 		DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
10574 				 new_plane_state->fb->width,
10575 				 new_plane_state->fb->height);
10576 		return -EINVAL;
10577 	}
10578 	if (new_plane_state->src_w != fb->width << 16 ||
10579 	    new_plane_state->src_h != fb->height << 16) {
10580 		DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10581 		return -EINVAL;
10582 	}
10583 
10584 	/* Pitch in pixels */
10585 	pitch = fb->pitches[0] / fb->format->cpp[0];
10586 
10587 	if (fb->width != pitch) {
10588 		DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
10589 				 fb->width, pitch);
10590 		return -EINVAL;
10591 	}
10592 
10593 	switch (pitch) {
10594 	case 64:
10595 	case 128:
10596 	case 256:
10597 		/* FB pitch is supported by cursor plane */
10598 		break;
10599 	default:
10600 		DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
10601 		return -EINVAL;
10602 	}
10603 
10604 	/* Core DRM takes care of checking FB modifiers, so we only need to
10605 	 * check tiling flags when the FB doesn't have a modifier. */
10606 	if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
10607 		if (adev->family < AMDGPU_FAMILY_AI) {
10608 			linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
10609 			         AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
10610 				 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
10611 		} else {
10612 			linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
10613 		}
10614 		if (!linear) {
10615 			DRM_DEBUG_ATOMIC("Cursor FB not linear");
10616 			return -EINVAL;
10617 		}
10618 	}
10619 
10620 	return 0;
10621 }
10622 
10623 static int dm_update_plane_state(struct dc *dc,
10624 				 struct drm_atomic_state *state,
10625 				 struct drm_plane *plane,
10626 				 struct drm_plane_state *old_plane_state,
10627 				 struct drm_plane_state *new_plane_state,
10628 				 bool enable,
10629 				 bool *lock_and_validation_needed)
10630 {
10631 
10632 	struct dm_atomic_state *dm_state = NULL;
10633 	struct drm_crtc *new_plane_crtc, *old_plane_crtc;
10634 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10635 	struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
10636 	struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
10637 	struct amdgpu_crtc *new_acrtc;
10638 	bool needs_reset;
10639 	int ret = 0;
10640 
10641 
10642 	new_plane_crtc = new_plane_state->crtc;
10643 	old_plane_crtc = old_plane_state->crtc;
10644 	dm_new_plane_state = to_dm_plane_state(new_plane_state);
10645 	dm_old_plane_state = to_dm_plane_state(old_plane_state);
10646 
10647 	if (plane->type == DRM_PLANE_TYPE_CURSOR) {
10648 		if (!enable || !new_plane_crtc ||
10649 			drm_atomic_plane_disabling(plane->state, new_plane_state))
10650 			return 0;
10651 
10652 		new_acrtc = to_amdgpu_crtc(new_plane_crtc);
10653 
10654 		if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
10655 			DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10656 			return -EINVAL;
10657 		}
10658 
10659 		if (new_plane_state->fb) {
10660 			ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
10661 						 new_plane_state->fb);
10662 			if (ret)
10663 				return ret;
10664 		}
10665 
10666 		return 0;
10667 	}
10668 
10669 	needs_reset = should_reset_plane(state, plane, old_plane_state,
10670 					 new_plane_state);
10671 
10672 	/* Remove any changed/removed planes */
10673 	if (!enable) {
10674 		if (!needs_reset)
10675 			return 0;
10676 
10677 		if (!old_plane_crtc)
10678 			return 0;
10679 
10680 		old_crtc_state = drm_atomic_get_old_crtc_state(
10681 				state, old_plane_crtc);
10682 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10683 
10684 		if (!dm_old_crtc_state->stream)
10685 			return 0;
10686 
10687 		DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
10688 				plane->base.id, old_plane_crtc->base.id);
10689 
10690 		ret = dm_atomic_get_state(state, &dm_state);
10691 		if (ret)
10692 			return ret;
10693 
10694 		if (!dc_remove_plane_from_context(
10695 				dc,
10696 				dm_old_crtc_state->stream,
10697 				dm_old_plane_state->dc_state,
10698 				dm_state->context)) {
10699 
10700 			return -EINVAL;
10701 		}
10702 
10703 
10704 		dc_plane_state_release(dm_old_plane_state->dc_state);
10705 		dm_new_plane_state->dc_state = NULL;
10706 
10707 		*lock_and_validation_needed = true;
10708 
10709 	} else { /* Add new planes */
10710 		struct dc_plane_state *dc_new_plane_state;
10711 
10712 		if (drm_atomic_plane_disabling(plane->state, new_plane_state))
10713 			return 0;
10714 
10715 		if (!new_plane_crtc)
10716 			return 0;
10717 
10718 		new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
10719 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10720 
10721 		if (!dm_new_crtc_state->stream)
10722 			return 0;
10723 
10724 		if (!needs_reset)
10725 			return 0;
10726 
10727 		ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
10728 		if (ret)
10729 			return ret;
10730 
10731 		WARN_ON(dm_new_plane_state->dc_state);
10732 
10733 		dc_new_plane_state = dc_create_plane_state(dc);
10734 		if (!dc_new_plane_state)
10735 			return -ENOMEM;
10736 
10737 		DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
10738 				 plane->base.id, new_plane_crtc->base.id);
10739 
10740 		ret = fill_dc_plane_attributes(
10741 			drm_to_adev(new_plane_crtc->dev),
10742 			dc_new_plane_state,
10743 			new_plane_state,
10744 			new_crtc_state);
10745 		if (ret) {
10746 			dc_plane_state_release(dc_new_plane_state);
10747 			return ret;
10748 		}
10749 
10750 		ret = dm_atomic_get_state(state, &dm_state);
10751 		if (ret) {
10752 			dc_plane_state_release(dc_new_plane_state);
10753 			return ret;
10754 		}
10755 
10756 		/*
10757 		 * Any atomic check errors that occur after this will
10758 		 * not need a release. The plane state will be attached
10759 		 * to the stream, and therefore part of the atomic
10760 		 * state. It'll be released when the atomic state is
10761 		 * cleaned.
10762 		 */
10763 		if (!dc_add_plane_to_context(
10764 				dc,
10765 				dm_new_crtc_state->stream,
10766 				dc_new_plane_state,
10767 				dm_state->context)) {
10768 
10769 			dc_plane_state_release(dc_new_plane_state);
10770 			return -EINVAL;
10771 		}
10772 
10773 		dm_new_plane_state->dc_state = dc_new_plane_state;
10774 
10775 		dm_new_crtc_state->mpo_requested |= (plane->type == DRM_PLANE_TYPE_OVERLAY);
10776 
10777 		/* Tell DC to do a full surface update every time there
10778 		 * is a plane change. Inefficient, but works for now.
10779 		 */
10780 		dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
10781 
10782 		*lock_and_validation_needed = true;
10783 	}
10784 
10785 
10786 	return ret;
10787 }
10788 
10789 static void dm_get_oriented_plane_size(struct drm_plane_state *plane_state,
10790 				       int *src_w, int *src_h)
10791 {
10792 	switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
10793 	case DRM_MODE_ROTATE_90:
10794 	case DRM_MODE_ROTATE_270:
10795 		*src_w = plane_state->src_h >> 16;
10796 		*src_h = plane_state->src_w >> 16;
10797 		break;
10798 	case DRM_MODE_ROTATE_0:
10799 	case DRM_MODE_ROTATE_180:
10800 	default:
10801 		*src_w = plane_state->src_w >> 16;
10802 		*src_h = plane_state->src_h >> 16;
10803 		break;
10804 	}
10805 }
10806 
10807 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
10808 				struct drm_crtc *crtc,
10809 				struct drm_crtc_state *new_crtc_state)
10810 {
10811 	struct drm_plane *cursor = crtc->cursor, *underlying;
10812 	struct drm_plane_state *new_cursor_state, *new_underlying_state;
10813 	int i;
10814 	int cursor_scale_w, cursor_scale_h, underlying_scale_w, underlying_scale_h;
10815 	int cursor_src_w, cursor_src_h;
10816 	int underlying_src_w, underlying_src_h;
10817 
10818 	/* On DCE and DCN there is no dedicated hardware cursor plane. We get a
10819 	 * cursor per pipe but it's going to inherit the scaling and
10820 	 * positioning from the underlying pipe. Check the cursor plane's
10821 	 * blending properties match the underlying planes'. */
10822 
10823 	new_cursor_state = drm_atomic_get_new_plane_state(state, cursor);
10824 	if (!new_cursor_state || !new_cursor_state->fb) {
10825 		return 0;
10826 	}
10827 
10828 	dm_get_oriented_plane_size(new_cursor_state, &cursor_src_w, &cursor_src_h);
10829 	cursor_scale_w = new_cursor_state->crtc_w * 1000 / cursor_src_w;
10830 	cursor_scale_h = new_cursor_state->crtc_h * 1000 / cursor_src_h;
10831 
10832 	for_each_new_plane_in_state_reverse(state, underlying, new_underlying_state, i) {
10833 		/* Narrow down to non-cursor planes on the same CRTC as the cursor */
10834 		if (new_underlying_state->crtc != crtc || underlying == crtc->cursor)
10835 			continue;
10836 
10837 		/* Ignore disabled planes */
10838 		if (!new_underlying_state->fb)
10839 			continue;
10840 
10841 		dm_get_oriented_plane_size(new_underlying_state,
10842 					   &underlying_src_w, &underlying_src_h);
10843 		underlying_scale_w = new_underlying_state->crtc_w * 1000 / underlying_src_w;
10844 		underlying_scale_h = new_underlying_state->crtc_h * 1000 / underlying_src_h;
10845 
10846 		if (cursor_scale_w != underlying_scale_w ||
10847 		    cursor_scale_h != underlying_scale_h) {
10848 			drm_dbg_atomic(crtc->dev,
10849 				       "Cursor [PLANE:%d:%s] scaling doesn't match underlying [PLANE:%d:%s]\n",
10850 				       cursor->base.id, cursor->name, underlying->base.id, underlying->name);
10851 			return -EINVAL;
10852 		}
10853 
10854 		/* If this plane covers the whole CRTC, no need to check planes underneath */
10855 		if (new_underlying_state->crtc_x <= 0 &&
10856 		    new_underlying_state->crtc_y <= 0 &&
10857 		    new_underlying_state->crtc_x + new_underlying_state->crtc_w >= new_crtc_state->mode.hdisplay &&
10858 		    new_underlying_state->crtc_y + new_underlying_state->crtc_h >= new_crtc_state->mode.vdisplay)
10859 			break;
10860 	}
10861 
10862 	return 0;
10863 }
10864 
10865 #if defined(CONFIG_DRM_AMD_DC_DCN)
10866 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
10867 {
10868 	struct drm_connector *connector;
10869 	struct drm_connector_state *conn_state, *old_conn_state;
10870 	struct amdgpu_dm_connector *aconnector = NULL;
10871 	int i;
10872 	for_each_oldnew_connector_in_state(state, connector, old_conn_state, conn_state, i) {
10873 		if (!conn_state->crtc)
10874 			conn_state = old_conn_state;
10875 
10876 		if (conn_state->crtc != crtc)
10877 			continue;
10878 
10879 		aconnector = to_amdgpu_dm_connector(connector);
10880 		if (!aconnector->port || !aconnector->mst_port)
10881 			aconnector = NULL;
10882 		else
10883 			break;
10884 	}
10885 
10886 	if (!aconnector)
10887 		return 0;
10888 
10889 	return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
10890 }
10891 #endif
10892 
10893 /**
10894  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
10895  * @dev: The DRM device
10896  * @state: The atomic state to commit
10897  *
10898  * Validate that the given atomic state is programmable by DC into hardware.
10899  * This involves constructing a &struct dc_state reflecting the new hardware
10900  * state we wish to commit, then querying DC to see if it is programmable. It's
10901  * important not to modify the existing DC state. Otherwise, atomic_check
10902  * may unexpectedly commit hardware changes.
10903  *
10904  * When validating the DC state, it's important that the right locks are
10905  * acquired. For full updates case which removes/adds/updates streams on one
10906  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
10907  * that any such full update commit will wait for completion of any outstanding
10908  * flip using DRMs synchronization events.
10909  *
10910  * Note that DM adds the affected connectors for all CRTCs in state, when that
10911  * might not seem necessary. This is because DC stream creation requires the
10912  * DC sink, which is tied to the DRM connector state. Cleaning this up should
10913  * be possible but non-trivial - a possible TODO item.
10914  *
10915  * Return: -Error code if validation failed.
10916  */
10917 static int amdgpu_dm_atomic_check(struct drm_device *dev,
10918 				  struct drm_atomic_state *state)
10919 {
10920 	struct amdgpu_device *adev = drm_to_adev(dev);
10921 	struct dm_atomic_state *dm_state = NULL;
10922 	struct dc *dc = adev->dm.dc;
10923 	struct drm_connector *connector;
10924 	struct drm_connector_state *old_con_state, *new_con_state;
10925 	struct drm_crtc *crtc;
10926 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10927 	struct drm_plane *plane;
10928 	struct drm_plane_state *old_plane_state, *new_plane_state;
10929 	enum dc_status status;
10930 	int ret, i;
10931 	bool lock_and_validation_needed = false;
10932 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
10933 #if defined(CONFIG_DRM_AMD_DC_DCN)
10934 	struct dsc_mst_fairness_vars vars[MAX_PIPES];
10935 	struct drm_dp_mst_topology_state *mst_state;
10936 	struct drm_dp_mst_topology_mgr *mgr;
10937 #endif
10938 
10939 	trace_amdgpu_dm_atomic_check_begin(state);
10940 
10941 	ret = drm_atomic_helper_check_modeset(dev, state);
10942 	if (ret) {
10943 		DRM_DEBUG_DRIVER("drm_atomic_helper_check_modeset() failed\n");
10944 		goto fail;
10945 	}
10946 
10947 	/* Check connector changes */
10948 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10949 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10950 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10951 
10952 		/* Skip connectors that are disabled or part of modeset already. */
10953 		if (!old_con_state->crtc && !new_con_state->crtc)
10954 			continue;
10955 
10956 		if (!new_con_state->crtc)
10957 			continue;
10958 
10959 		new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
10960 		if (IS_ERR(new_crtc_state)) {
10961 			DRM_DEBUG_DRIVER("drm_atomic_get_crtc_state() failed\n");
10962 			ret = PTR_ERR(new_crtc_state);
10963 			goto fail;
10964 		}
10965 
10966 		if (dm_old_con_state->abm_level !=
10967 		    dm_new_con_state->abm_level)
10968 			new_crtc_state->connectors_changed = true;
10969 	}
10970 
10971 #if defined(CONFIG_DRM_AMD_DC_DCN)
10972 	if (dc_resource_is_dsc_encoding_supported(dc)) {
10973 		for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10974 			if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10975 				ret = add_affected_mst_dsc_crtcs(state, crtc);
10976 				if (ret) {
10977 					DRM_DEBUG_DRIVER("add_affected_mst_dsc_crtcs() failed\n");
10978 					goto fail;
10979 				}
10980 			}
10981 		}
10982 		pre_validate_dsc(state, &dm_state, vars);
10983 	}
10984 #endif
10985 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10986 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10987 
10988 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
10989 		    !new_crtc_state->color_mgmt_changed &&
10990 		    old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
10991 			dm_old_crtc_state->dsc_force_changed == false)
10992 			continue;
10993 
10994 		ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
10995 		if (ret) {
10996 			DRM_DEBUG_DRIVER("amdgpu_dm_verify_lut_sizes() failed\n");
10997 			goto fail;
10998 		}
10999 
11000 		if (!new_crtc_state->enable)
11001 			continue;
11002 
11003 		ret = drm_atomic_add_affected_connectors(state, crtc);
11004 		if (ret) {
11005 			DRM_DEBUG_DRIVER("drm_atomic_add_affected_connectors() failed\n");
11006 			goto fail;
11007 		}
11008 
11009 		ret = drm_atomic_add_affected_planes(state, crtc);
11010 		if (ret) {
11011 			DRM_DEBUG_DRIVER("drm_atomic_add_affected_planes() failed\n");
11012 			goto fail;
11013 		}
11014 
11015 		if (dm_old_crtc_state->dsc_force_changed)
11016 			new_crtc_state->mode_changed = true;
11017 	}
11018 
11019 	/*
11020 	 * Add all primary and overlay planes on the CRTC to the state
11021 	 * whenever a plane is enabled to maintain correct z-ordering
11022 	 * and to enable fast surface updates.
11023 	 */
11024 	drm_for_each_crtc(crtc, dev) {
11025 		bool modified = false;
11026 
11027 		for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
11028 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
11029 				continue;
11030 
11031 			if (new_plane_state->crtc == crtc ||
11032 			    old_plane_state->crtc == crtc) {
11033 				modified = true;
11034 				break;
11035 			}
11036 		}
11037 
11038 		if (!modified)
11039 			continue;
11040 
11041 		drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
11042 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
11043 				continue;
11044 
11045 			new_plane_state =
11046 				drm_atomic_get_plane_state(state, plane);
11047 
11048 			if (IS_ERR(new_plane_state)) {
11049 				ret = PTR_ERR(new_plane_state);
11050 				DRM_DEBUG_DRIVER("new_plane_state is BAD\n");
11051 				goto fail;
11052 			}
11053 		}
11054 	}
11055 
11056 	/* Remove exiting planes if they are modified */
11057 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
11058 		ret = dm_update_plane_state(dc, state, plane,
11059 					    old_plane_state,
11060 					    new_plane_state,
11061 					    false,
11062 					    &lock_and_validation_needed);
11063 		if (ret) {
11064 			DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
11065 			goto fail;
11066 		}
11067 	}
11068 
11069 	/* Disable all crtcs which require disable */
11070 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11071 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
11072 					   old_crtc_state,
11073 					   new_crtc_state,
11074 					   false,
11075 					   &lock_and_validation_needed);
11076 		if (ret) {
11077 			DRM_DEBUG_DRIVER("DISABLE: dm_update_crtc_state() failed\n");
11078 			goto fail;
11079 		}
11080 	}
11081 
11082 	/* Enable all crtcs which require enable */
11083 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11084 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
11085 					   old_crtc_state,
11086 					   new_crtc_state,
11087 					   true,
11088 					   &lock_and_validation_needed);
11089 		if (ret) {
11090 			DRM_DEBUG_DRIVER("ENABLE: dm_update_crtc_state() failed\n");
11091 			goto fail;
11092 		}
11093 	}
11094 
11095 	/* Add new/modified planes */
11096 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
11097 		ret = dm_update_plane_state(dc, state, plane,
11098 					    old_plane_state,
11099 					    new_plane_state,
11100 					    true,
11101 					    &lock_and_validation_needed);
11102 		if (ret) {
11103 			DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
11104 			goto fail;
11105 		}
11106 	}
11107 
11108 	/* Run this here since we want to validate the streams we created */
11109 	ret = drm_atomic_helper_check_planes(dev, state);
11110 	if (ret) {
11111 		DRM_DEBUG_DRIVER("drm_atomic_helper_check_planes() failed\n");
11112 		goto fail;
11113 	}
11114 
11115 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
11116 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
11117 		if (dm_new_crtc_state->mpo_requested)
11118 			DRM_DEBUG_DRIVER("MPO enablement requested on crtc:[%p]\n", crtc);
11119 	}
11120 
11121 	/* Check cursor planes scaling */
11122 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
11123 		ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
11124 		if (ret) {
11125 			DRM_DEBUG_DRIVER("dm_check_crtc_cursor() failed\n");
11126 			goto fail;
11127 		}
11128 	}
11129 
11130 	if (state->legacy_cursor_update) {
11131 		/*
11132 		 * This is a fast cursor update coming from the plane update
11133 		 * helper, check if it can be done asynchronously for better
11134 		 * performance.
11135 		 */
11136 		state->async_update =
11137 			!drm_atomic_helper_async_check(dev, state);
11138 
11139 		/*
11140 		 * Skip the remaining global validation if this is an async
11141 		 * update. Cursor updates can be done without affecting
11142 		 * state or bandwidth calcs and this avoids the performance
11143 		 * penalty of locking the private state object and
11144 		 * allocating a new dc_state.
11145 		 */
11146 		if (state->async_update)
11147 			return 0;
11148 	}
11149 
11150 	/* Check scaling and underscan changes*/
11151 	/* TODO Removed scaling changes validation due to inability to commit
11152 	 * new stream into context w\o causing full reset. Need to
11153 	 * decide how to handle.
11154 	 */
11155 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
11156 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
11157 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
11158 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
11159 
11160 		/* Skip any modesets/resets */
11161 		if (!acrtc || drm_atomic_crtc_needs_modeset(
11162 				drm_atomic_get_new_crtc_state(state, &acrtc->base)))
11163 			continue;
11164 
11165 		/* Skip any thing not scale or underscan changes */
11166 		if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
11167 			continue;
11168 
11169 		lock_and_validation_needed = true;
11170 	}
11171 
11172 #if defined(CONFIG_DRM_AMD_DC_DCN)
11173 	/* set the slot info for each mst_state based on the link encoding format */
11174 	for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
11175 		struct amdgpu_dm_connector *aconnector;
11176 		struct drm_connector *connector;
11177 		struct drm_connector_list_iter iter;
11178 		u8 link_coding_cap;
11179 
11180 		if (!mgr->mst_state )
11181 			continue;
11182 
11183 		drm_connector_list_iter_begin(dev, &iter);
11184 		drm_for_each_connector_iter(connector, &iter) {
11185 			int id = connector->index;
11186 
11187 			if (id == mst_state->mgr->conn_base_id) {
11188 				aconnector = to_amdgpu_dm_connector(connector);
11189 				link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link);
11190 				drm_dp_mst_update_slots(mst_state, link_coding_cap);
11191 
11192 				break;
11193 			}
11194 		}
11195 		drm_connector_list_iter_end(&iter);
11196 
11197 	}
11198 #endif
11199 	/**
11200 	 * Streams and planes are reset when there are changes that affect
11201 	 * bandwidth. Anything that affects bandwidth needs to go through
11202 	 * DC global validation to ensure that the configuration can be applied
11203 	 * to hardware.
11204 	 *
11205 	 * We have to currently stall out here in atomic_check for outstanding
11206 	 * commits to finish in this case because our IRQ handlers reference
11207 	 * DRM state directly - we can end up disabling interrupts too early
11208 	 * if we don't.
11209 	 *
11210 	 * TODO: Remove this stall and drop DM state private objects.
11211 	 */
11212 	if (lock_and_validation_needed) {
11213 		ret = dm_atomic_get_state(state, &dm_state);
11214 		if (ret) {
11215 			DRM_DEBUG_DRIVER("dm_atomic_get_state() failed\n");
11216 			goto fail;
11217 		}
11218 
11219 		ret = do_aquire_global_lock(dev, state);
11220 		if (ret) {
11221 			DRM_DEBUG_DRIVER("do_aquire_global_lock() failed\n");
11222 			goto fail;
11223 		}
11224 
11225 #if defined(CONFIG_DRM_AMD_DC_DCN)
11226 		if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars)) {
11227 			DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n");
11228 			goto fail;
11229 		}
11230 
11231 		ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
11232 		if (ret) {
11233 			DRM_DEBUG_DRIVER("dm_update_mst_vcpi_slots_for_dsc() failed\n");
11234 			goto fail;
11235 		}
11236 #endif
11237 
11238 		/*
11239 		 * Perform validation of MST topology in the state:
11240 		 * We need to perform MST atomic check before calling
11241 		 * dc_validate_global_state(), or there is a chance
11242 		 * to get stuck in an infinite loop and hang eventually.
11243 		 */
11244 		ret = drm_dp_mst_atomic_check(state);
11245 		if (ret) {
11246 			DRM_DEBUG_DRIVER("drm_dp_mst_atomic_check() failed\n");
11247 			goto fail;
11248 		}
11249 		status = dc_validate_global_state(dc, dm_state->context, true);
11250 		if (status != DC_OK) {
11251 			DRM_DEBUG_DRIVER("DC global validation failure: %s (%d)",
11252 				       dc_status_to_str(status), status);
11253 			ret = -EINVAL;
11254 			goto fail;
11255 		}
11256 	} else {
11257 		/*
11258 		 * The commit is a fast update. Fast updates shouldn't change
11259 		 * the DC context, affect global validation, and can have their
11260 		 * commit work done in parallel with other commits not touching
11261 		 * the same resource. If we have a new DC context as part of
11262 		 * the DM atomic state from validation we need to free it and
11263 		 * retain the existing one instead.
11264 		 *
11265 		 * Furthermore, since the DM atomic state only contains the DC
11266 		 * context and can safely be annulled, we can free the state
11267 		 * and clear the associated private object now to free
11268 		 * some memory and avoid a possible use-after-free later.
11269 		 */
11270 
11271 		for (i = 0; i < state->num_private_objs; i++) {
11272 			struct drm_private_obj *obj = state->private_objs[i].ptr;
11273 
11274 			if (obj->funcs == adev->dm.atomic_obj.funcs) {
11275 				int j = state->num_private_objs-1;
11276 
11277 				dm_atomic_destroy_state(obj,
11278 						state->private_objs[i].state);
11279 
11280 				/* If i is not at the end of the array then the
11281 				 * last element needs to be moved to where i was
11282 				 * before the array can safely be truncated.
11283 				 */
11284 				if (i != j)
11285 					state->private_objs[i] =
11286 						state->private_objs[j];
11287 
11288 				state->private_objs[j].ptr = NULL;
11289 				state->private_objs[j].state = NULL;
11290 				state->private_objs[j].old_state = NULL;
11291 				state->private_objs[j].new_state = NULL;
11292 
11293 				state->num_private_objs = j;
11294 				break;
11295 			}
11296 		}
11297 	}
11298 
11299 	/* Store the overall update type for use later in atomic check. */
11300 	for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
11301 		struct dm_crtc_state *dm_new_crtc_state =
11302 			to_dm_crtc_state(new_crtc_state);
11303 
11304 		dm_new_crtc_state->update_type = lock_and_validation_needed ?
11305 							 UPDATE_TYPE_FULL :
11306 							 UPDATE_TYPE_FAST;
11307 	}
11308 
11309 	/* Must be success */
11310 	WARN_ON(ret);
11311 
11312 	trace_amdgpu_dm_atomic_check_finish(state, ret);
11313 
11314 	return ret;
11315 
11316 fail:
11317 	if (ret == -EDEADLK)
11318 		DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
11319 	else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
11320 		DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
11321 	else
11322 		DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
11323 
11324 	trace_amdgpu_dm_atomic_check_finish(state, ret);
11325 
11326 	return ret;
11327 }
11328 
11329 static bool is_dp_capable_without_timing_msa(struct dc *dc,
11330 					     struct amdgpu_dm_connector *amdgpu_dm_connector)
11331 {
11332 	uint8_t dpcd_data;
11333 	bool capable = false;
11334 
11335 	if (amdgpu_dm_connector->dc_link &&
11336 		dm_helpers_dp_read_dpcd(
11337 				NULL,
11338 				amdgpu_dm_connector->dc_link,
11339 				DP_DOWN_STREAM_PORT_COUNT,
11340 				&dpcd_data,
11341 				sizeof(dpcd_data))) {
11342 		capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
11343 	}
11344 
11345 	return capable;
11346 }
11347 
11348 static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
11349 		unsigned int offset,
11350 		unsigned int total_length,
11351 		uint8_t *data,
11352 		unsigned int length,
11353 		struct amdgpu_hdmi_vsdb_info *vsdb)
11354 {
11355 	bool res;
11356 	union dmub_rb_cmd cmd;
11357 	struct dmub_cmd_send_edid_cea *input;
11358 	struct dmub_cmd_edid_cea_output *output;
11359 
11360 	if (length > DMUB_EDID_CEA_DATA_CHUNK_BYTES)
11361 		return false;
11362 
11363 	memset(&cmd, 0, sizeof(cmd));
11364 
11365 	input = &cmd.edid_cea.data.input;
11366 
11367 	cmd.edid_cea.header.type = DMUB_CMD__EDID_CEA;
11368 	cmd.edid_cea.header.sub_type = 0;
11369 	cmd.edid_cea.header.payload_bytes =
11370 		sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header);
11371 	input->offset = offset;
11372 	input->length = length;
11373 	input->cea_total_length = total_length;
11374 	memcpy(input->payload, data, length);
11375 
11376 	res = dc_dmub_srv_cmd_with_reply_data(dm->dc->ctx->dmub_srv, &cmd);
11377 	if (!res) {
11378 		DRM_ERROR("EDID CEA parser failed\n");
11379 		return false;
11380 	}
11381 
11382 	output = &cmd.edid_cea.data.output;
11383 
11384 	if (output->type == DMUB_CMD__EDID_CEA_ACK) {
11385 		if (!output->ack.success) {
11386 			DRM_ERROR("EDID CEA ack failed at offset %d\n",
11387 					output->ack.offset);
11388 		}
11389 	} else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) {
11390 		if (!output->amd_vsdb.vsdb_found)
11391 			return false;
11392 
11393 		vsdb->freesync_supported = output->amd_vsdb.freesync_supported;
11394 		vsdb->amd_vsdb_version = output->amd_vsdb.amd_vsdb_version;
11395 		vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate;
11396 		vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate;
11397 	} else {
11398 		DRM_WARN("Unknown EDID CEA parser results\n");
11399 		return false;
11400 	}
11401 
11402 	return true;
11403 }
11404 
11405 static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
11406 		uint8_t *edid_ext, int len,
11407 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
11408 {
11409 	int i;
11410 
11411 	/* send extension block to DMCU for parsing */
11412 	for (i = 0; i < len; i += 8) {
11413 		bool res;
11414 		int offset;
11415 
11416 		/* send 8 bytes a time */
11417 		if (!dc_edid_parser_send_cea(dm->dc, i, len, &edid_ext[i], 8))
11418 			return false;
11419 
11420 		if (i+8 == len) {
11421 			/* EDID block sent completed, expect result */
11422 			int version, min_rate, max_rate;
11423 
11424 			res = dc_edid_parser_recv_amd_vsdb(dm->dc, &version, &min_rate, &max_rate);
11425 			if (res) {
11426 				/* amd vsdb found */
11427 				vsdb_info->freesync_supported = 1;
11428 				vsdb_info->amd_vsdb_version = version;
11429 				vsdb_info->min_refresh_rate_hz = min_rate;
11430 				vsdb_info->max_refresh_rate_hz = max_rate;
11431 				return true;
11432 			}
11433 			/* not amd vsdb */
11434 			return false;
11435 		}
11436 
11437 		/* check for ack*/
11438 		res = dc_edid_parser_recv_cea_ack(dm->dc, &offset);
11439 		if (!res)
11440 			return false;
11441 	}
11442 
11443 	return false;
11444 }
11445 
11446 static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,
11447 		uint8_t *edid_ext, int len,
11448 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
11449 {
11450 	int i;
11451 
11452 	/* send extension block to DMCU for parsing */
11453 	for (i = 0; i < len; i += 8) {
11454 		/* send 8 bytes a time */
11455 		if (!dm_edid_parser_send_cea(dm, i, len, &edid_ext[i], 8, vsdb_info))
11456 			return false;
11457 	}
11458 
11459 	return vsdb_info->freesync_supported;
11460 }
11461 
11462 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
11463 		uint8_t *edid_ext, int len,
11464 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
11465 {
11466 	struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
11467 
11468 	if (adev->dm.dmub_srv)
11469 		return parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info);
11470 	else
11471 		return parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info);
11472 }
11473 
11474 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
11475 		struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
11476 {
11477 	uint8_t *edid_ext = NULL;
11478 	int i;
11479 	bool valid_vsdb_found = false;
11480 
11481 	/*----- drm_find_cea_extension() -----*/
11482 	/* No EDID or EDID extensions */
11483 	if (edid == NULL || edid->extensions == 0)
11484 		return -ENODEV;
11485 
11486 	/* Find CEA extension */
11487 	for (i = 0; i < edid->extensions; i++) {
11488 		edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
11489 		if (edid_ext[0] == CEA_EXT)
11490 			break;
11491 	}
11492 
11493 	if (i == edid->extensions)
11494 		return -ENODEV;
11495 
11496 	/*----- cea_db_offsets() -----*/
11497 	if (edid_ext[0] != CEA_EXT)
11498 		return -ENODEV;
11499 
11500 	valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
11501 
11502 	return valid_vsdb_found ? i : -ENODEV;
11503 }
11504 
11505 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
11506 					struct edid *edid)
11507 {
11508 	int i = 0;
11509 	struct detailed_timing *timing;
11510 	struct detailed_non_pixel *data;
11511 	struct detailed_data_monitor_range *range;
11512 	struct amdgpu_dm_connector *amdgpu_dm_connector =
11513 			to_amdgpu_dm_connector(connector);
11514 	struct dm_connector_state *dm_con_state = NULL;
11515 	struct dc_sink *sink;
11516 
11517 	struct drm_device *dev = connector->dev;
11518 	struct amdgpu_device *adev = drm_to_adev(dev);
11519 	bool freesync_capable = false;
11520 	struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
11521 
11522 	if (!connector->state) {
11523 		DRM_ERROR("%s - Connector has no state", __func__);
11524 		goto update;
11525 	}
11526 
11527 	sink = amdgpu_dm_connector->dc_sink ?
11528 		amdgpu_dm_connector->dc_sink :
11529 		amdgpu_dm_connector->dc_em_sink;
11530 
11531 	if (!edid || !sink) {
11532 		dm_con_state = to_dm_connector_state(connector->state);
11533 
11534 		amdgpu_dm_connector->min_vfreq = 0;
11535 		amdgpu_dm_connector->max_vfreq = 0;
11536 		amdgpu_dm_connector->pixel_clock_mhz = 0;
11537 		connector->display_info.monitor_range.min_vfreq = 0;
11538 		connector->display_info.monitor_range.max_vfreq = 0;
11539 		freesync_capable = false;
11540 
11541 		goto update;
11542 	}
11543 
11544 	dm_con_state = to_dm_connector_state(connector->state);
11545 
11546 	if (!adev->dm.freesync_module)
11547 		goto update;
11548 
11549 
11550 	if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
11551 		|| sink->sink_signal == SIGNAL_TYPE_EDP) {
11552 		bool edid_check_required = false;
11553 
11554 		if (edid) {
11555 			edid_check_required = is_dp_capable_without_timing_msa(
11556 						adev->dm.dc,
11557 						amdgpu_dm_connector);
11558 		}
11559 
11560 		if (edid_check_required == true && (edid->version > 1 ||
11561 		   (edid->version == 1 && edid->revision > 1))) {
11562 			for (i = 0; i < 4; i++) {
11563 
11564 				timing	= &edid->detailed_timings[i];
11565 				data	= &timing->data.other_data;
11566 				range	= &data->data.range;
11567 				/*
11568 				 * Check if monitor has continuous frequency mode
11569 				 */
11570 				if (data->type != EDID_DETAIL_MONITOR_RANGE)
11571 					continue;
11572 				/*
11573 				 * Check for flag range limits only. If flag == 1 then
11574 				 * no additional timing information provided.
11575 				 * Default GTF, GTF Secondary curve and CVT are not
11576 				 * supported
11577 				 */
11578 				if (range->flags != 1)
11579 					continue;
11580 
11581 				amdgpu_dm_connector->min_vfreq = range->min_vfreq;
11582 				amdgpu_dm_connector->max_vfreq = range->max_vfreq;
11583 				amdgpu_dm_connector->pixel_clock_mhz =
11584 					range->pixel_clock_mhz * 10;
11585 
11586 				connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
11587 				connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
11588 
11589 				break;
11590 			}
11591 
11592 			if (amdgpu_dm_connector->max_vfreq -
11593 			    amdgpu_dm_connector->min_vfreq > 10) {
11594 
11595 				freesync_capable = true;
11596 			}
11597 		}
11598 	} else if (edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
11599 		i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
11600 		if (i >= 0 && vsdb_info.freesync_supported) {
11601 			timing  = &edid->detailed_timings[i];
11602 			data    = &timing->data.other_data;
11603 
11604 			amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
11605 			amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
11606 			if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
11607 				freesync_capable = true;
11608 
11609 			connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
11610 			connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
11611 		}
11612 	}
11613 
11614 update:
11615 	if (dm_con_state)
11616 		dm_con_state->freesync_capable = freesync_capable;
11617 
11618 	if (connector->vrr_capable_property)
11619 		drm_connector_set_vrr_capable_property(connector,
11620 						       freesync_capable);
11621 }
11622 
11623 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
11624 {
11625 	struct amdgpu_device *adev = drm_to_adev(dev);
11626 	struct dc *dc = adev->dm.dc;
11627 	int i;
11628 
11629 	mutex_lock(&adev->dm.dc_lock);
11630 	if (dc->current_state) {
11631 		for (i = 0; i < dc->current_state->stream_count; ++i)
11632 			dc->current_state->streams[i]
11633 				->triggered_crtc_reset.enabled =
11634 				adev->dm.force_timing_sync;
11635 
11636 		dm_enable_per_frame_crtc_master_sync(dc->current_state);
11637 		dc_trigger_sync(dc, dc->current_state);
11638 	}
11639 	mutex_unlock(&adev->dm.dc_lock);
11640 }
11641 
11642 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
11643 		       uint32_t value, const char *func_name)
11644 {
11645 #ifdef DM_CHECK_ADDR_0
11646 	if (address == 0) {
11647 		DC_ERR("invalid register write. address = 0");
11648 		return;
11649 	}
11650 #endif
11651 	cgs_write_register(ctx->cgs_device, address, value);
11652 	trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
11653 }
11654 
11655 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
11656 			  const char *func_name)
11657 {
11658 	uint32_t value;
11659 #ifdef DM_CHECK_ADDR_0
11660 	if (address == 0) {
11661 		DC_ERR("invalid register read; address = 0\n");
11662 		return 0;
11663 	}
11664 #endif
11665 
11666 	if (ctx->dmub_srv &&
11667 	    ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
11668 	    !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
11669 		ASSERT(false);
11670 		return 0;
11671 	}
11672 
11673 	value = cgs_read_register(ctx->cgs_device, address);
11674 
11675 	trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
11676 
11677 	return value;
11678 }
11679 
11680 static int amdgpu_dm_set_dmub_async_sync_status(bool is_cmd_aux,
11681 						struct dc_context *ctx,
11682 						uint8_t status_type,
11683 						uint32_t *operation_result)
11684 {
11685 	struct amdgpu_device *adev = ctx->driver_context;
11686 	int return_status = -1;
11687 	struct dmub_notification *p_notify = adev->dm.dmub_notify;
11688 
11689 	if (is_cmd_aux) {
11690 		if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
11691 			return_status = p_notify->aux_reply.length;
11692 			*operation_result = p_notify->result;
11693 		} else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT) {
11694 			*operation_result = AUX_RET_ERROR_TIMEOUT;
11695 		} else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_FAIL) {
11696 			*operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE;
11697 		} else {
11698 			*operation_result = AUX_RET_ERROR_UNKNOWN;
11699 		}
11700 	} else {
11701 		if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
11702 			return_status = 0;
11703 			*operation_result = p_notify->sc_status;
11704 		} else {
11705 			*operation_result = SET_CONFIG_UNKNOWN_ERROR;
11706 		}
11707 	}
11708 
11709 	return return_status;
11710 }
11711 
11712 int amdgpu_dm_process_dmub_aux_transfer_sync(bool is_cmd_aux, struct dc_context *ctx,
11713 	unsigned int link_index, void *cmd_payload, void *operation_result)
11714 {
11715 	struct amdgpu_device *adev = ctx->driver_context;
11716 	int ret = 0;
11717 
11718 	if (is_cmd_aux) {
11719 		dc_process_dmub_aux_transfer_async(ctx->dc,
11720 			link_index, (struct aux_payload *)cmd_payload);
11721 	} else if (dc_process_dmub_set_config_async(ctx->dc, link_index,
11722 					(struct set_config_cmd_payload *)cmd_payload,
11723 					adev->dm.dmub_notify)) {
11724 		return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11725 					ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
11726 					(uint32_t *)operation_result);
11727 	}
11728 
11729 	ret = wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ);
11730 	if (ret == 0) {
11731 		DRM_ERROR("wait_for_completion_timeout timeout!");
11732 		return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11733 				ctx, DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT,
11734 				(uint32_t *)operation_result);
11735 	}
11736 
11737 	if (is_cmd_aux) {
11738 		if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) {
11739 			struct aux_payload *payload = (struct aux_payload *)cmd_payload;
11740 
11741 			payload->reply[0] = adev->dm.dmub_notify->aux_reply.command;
11742 			if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
11743 			    payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK) {
11744 				memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
11745 				       adev->dm.dmub_notify->aux_reply.length);
11746 			}
11747 		}
11748 	}
11749 
11750 	return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11751 			ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
11752 			(uint32_t *)operation_result);
11753 }
11754 
11755 /*
11756  * Check whether seamless boot is supported.
11757  *
11758  * So far we only support seamless boot on CHIP_VANGOGH.
11759  * If everything goes well, we may consider expanding
11760  * seamless boot to other ASICs.
11761  */
11762 bool check_seamless_boot_capability(struct amdgpu_device *adev)
11763 {
11764 	switch (adev->asic_type) {
11765 	case CHIP_VANGOGH:
11766 		if (!adev->mman.keep_stolen_vga_memory)
11767 			return true;
11768 		break;
11769 	default:
11770 		break;
11771 	}
11772 
11773 	return false;
11774 }
11775