xref: /linux/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c (revision 5b723b12301272ed3c6c99c4ad8b43a520f880ea)
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28 
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc_link_dp.h"
32 #include "link_enc_cfg.h"
33 #include "dc/inc/core_types.h"
34 #include "dal_asic_id.h"
35 #include "dmub/dmub_srv.h"
36 #include "dc/inc/hw/dmcu.h"
37 #include "dc/inc/hw/abm.h"
38 #include "dc/dc_dmub_srv.h"
39 #include "dc/dc_edid_parser.h"
40 #include "dc/dc_stat.h"
41 #include "amdgpu_dm_trace.h"
42 
43 #include "vid.h"
44 #include "amdgpu.h"
45 #include "amdgpu_display.h"
46 #include "amdgpu_ucode.h"
47 #include "atom.h"
48 #include "amdgpu_dm.h"
49 #ifdef CONFIG_DRM_AMD_DC_HDCP
50 #include "amdgpu_dm_hdcp.h"
51 #include <drm/drm_hdcp.h>
52 #endif
53 #include "amdgpu_pm.h"
54 #include "amdgpu_atombios.h"
55 
56 #include "amd_shared.h"
57 #include "amdgpu_dm_irq.h"
58 #include "dm_helpers.h"
59 #include "amdgpu_dm_mst_types.h"
60 #if defined(CONFIG_DEBUG_FS)
61 #include "amdgpu_dm_debugfs.h"
62 #endif
63 #include "amdgpu_dm_psr.h"
64 
65 #include "ivsrcid/ivsrcid_vislands30.h"
66 
67 #include "i2caux_interface.h"
68 #include <linux/module.h>
69 #include <linux/moduleparam.h>
70 #include <linux/types.h>
71 #include <linux/pm_runtime.h>
72 #include <linux/pci.h>
73 #include <linux/firmware.h>
74 #include <linux/component.h>
75 
76 #include <drm/drm_atomic.h>
77 #include <drm/drm_atomic_uapi.h>
78 #include <drm/drm_atomic_helper.h>
79 #include <drm/dp/drm_dp_mst_helper.h>
80 #include <drm/drm_fb_helper.h>
81 #include <drm/drm_fourcc.h>
82 #include <drm/drm_edid.h>
83 #include <drm/drm_vblank.h>
84 #include <drm/drm_audio_component.h>
85 
86 #if defined(CONFIG_DRM_AMD_DC_DCN)
87 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
88 
89 #include "dcn/dcn_1_0_offset.h"
90 #include "dcn/dcn_1_0_sh_mask.h"
91 #include "soc15_hw_ip.h"
92 #include "vega10_ip_offset.h"
93 
94 #include "soc15_common.h"
95 #endif
96 
97 #include "modules/inc/mod_freesync.h"
98 #include "modules/power/power_helpers.h"
99 #include "modules/inc/mod_info_packet.h"
100 
101 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
102 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
103 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
104 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
105 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
106 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
107 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
108 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
109 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
110 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
111 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
112 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
113 #define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
114 MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
115 #define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin"
116 MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB);
117 #define FIRMWARE_DCN316_DMUB "amdgpu/dcn_3_1_6_dmcub.bin"
118 MODULE_FIRMWARE(FIRMWARE_DCN316_DMUB);
119 
120 #define FIRMWARE_RAVEN_DMCU		"amdgpu/raven_dmcu.bin"
121 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
122 
123 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
124 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
125 
126 /* Number of bytes in PSP header for firmware. */
127 #define PSP_HEADER_BYTES 0x100
128 
129 /* Number of bytes in PSP footer for firmware. */
130 #define PSP_FOOTER_BYTES 0x100
131 
132 /**
133  * DOC: overview
134  *
135  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
136  * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
137  * requests into DC requests, and DC responses into DRM responses.
138  *
139  * The root control structure is &struct amdgpu_display_manager.
140  */
141 
142 /* basic init/fini API */
143 static int amdgpu_dm_init(struct amdgpu_device *adev);
144 static void amdgpu_dm_fini(struct amdgpu_device *adev);
145 static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
146 
147 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
148 {
149 	switch (link->dpcd_caps.dongle_type) {
150 	case DISPLAY_DONGLE_NONE:
151 		return DRM_MODE_SUBCONNECTOR_Native;
152 	case DISPLAY_DONGLE_DP_VGA_CONVERTER:
153 		return DRM_MODE_SUBCONNECTOR_VGA;
154 	case DISPLAY_DONGLE_DP_DVI_CONVERTER:
155 	case DISPLAY_DONGLE_DP_DVI_DONGLE:
156 		return DRM_MODE_SUBCONNECTOR_DVID;
157 	case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
158 	case DISPLAY_DONGLE_DP_HDMI_DONGLE:
159 		return DRM_MODE_SUBCONNECTOR_HDMIA;
160 	case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
161 	default:
162 		return DRM_MODE_SUBCONNECTOR_Unknown;
163 	}
164 }
165 
166 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
167 {
168 	struct dc_link *link = aconnector->dc_link;
169 	struct drm_connector *connector = &aconnector->base;
170 	enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
171 
172 	if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
173 		return;
174 
175 	if (aconnector->dc_sink)
176 		subconnector = get_subconnector_type(link);
177 
178 	drm_object_property_set_value(&connector->base,
179 			connector->dev->mode_config.dp_subconnector_property,
180 			subconnector);
181 }
182 
183 /*
184  * initializes drm_device display related structures, based on the information
185  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
186  * drm_encoder, drm_mode_config
187  *
188  * Returns 0 on success
189  */
190 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
191 /* removes and deallocates the drm structures, created by the above function */
192 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
193 
194 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
195 				struct drm_plane *plane,
196 				unsigned long possible_crtcs,
197 				const struct dc_plane_cap *plane_cap);
198 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
199 			       struct drm_plane *plane,
200 			       uint32_t link_index);
201 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
202 				    struct amdgpu_dm_connector *amdgpu_dm_connector,
203 				    uint32_t link_index,
204 				    struct amdgpu_encoder *amdgpu_encoder);
205 static int amdgpu_dm_encoder_init(struct drm_device *dev,
206 				  struct amdgpu_encoder *aencoder,
207 				  uint32_t link_index);
208 
209 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
210 
211 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
212 
213 static int amdgpu_dm_atomic_check(struct drm_device *dev,
214 				  struct drm_atomic_state *state);
215 
216 static void handle_cursor_update(struct drm_plane *plane,
217 				 struct drm_plane_state *old_plane_state);
218 
219 static const struct drm_format_info *
220 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
221 
222 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector);
223 static void handle_hpd_rx_irq(void *param);
224 
225 static bool
226 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
227 				 struct drm_crtc_state *new_crtc_state);
228 /*
229  * dm_vblank_get_counter
230  *
231  * @brief
232  * Get counter for number of vertical blanks
233  *
234  * @param
235  * struct amdgpu_device *adev - [in] desired amdgpu device
236  * int disp_idx - [in] which CRTC to get the counter from
237  *
238  * @return
239  * Counter for vertical blanks
240  */
241 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
242 {
243 	if (crtc >= adev->mode_info.num_crtc)
244 		return 0;
245 	else {
246 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
247 
248 		if (acrtc->dm_irq_params.stream == NULL) {
249 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
250 				  crtc);
251 			return 0;
252 		}
253 
254 		return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
255 	}
256 }
257 
258 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
259 				  u32 *vbl, u32 *position)
260 {
261 	uint32_t v_blank_start, v_blank_end, h_position, v_position;
262 
263 	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
264 		return -EINVAL;
265 	else {
266 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
267 
268 		if (acrtc->dm_irq_params.stream ==  NULL) {
269 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
270 				  crtc);
271 			return 0;
272 		}
273 
274 		/*
275 		 * TODO rework base driver to use values directly.
276 		 * for now parse it back into reg-format
277 		 */
278 		dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
279 					 &v_blank_start,
280 					 &v_blank_end,
281 					 &h_position,
282 					 &v_position);
283 
284 		*position = v_position | (h_position << 16);
285 		*vbl = v_blank_start | (v_blank_end << 16);
286 	}
287 
288 	return 0;
289 }
290 
291 static bool dm_is_idle(void *handle)
292 {
293 	/* XXX todo */
294 	return true;
295 }
296 
297 static int dm_wait_for_idle(void *handle)
298 {
299 	/* XXX todo */
300 	return 0;
301 }
302 
303 static bool dm_check_soft_reset(void *handle)
304 {
305 	return false;
306 }
307 
308 static int dm_soft_reset(void *handle)
309 {
310 	/* XXX todo */
311 	return 0;
312 }
313 
314 static struct amdgpu_crtc *
315 get_crtc_by_otg_inst(struct amdgpu_device *adev,
316 		     int otg_inst)
317 {
318 	struct drm_device *dev = adev_to_drm(adev);
319 	struct drm_crtc *crtc;
320 	struct amdgpu_crtc *amdgpu_crtc;
321 
322 	if (WARN_ON(otg_inst == -1))
323 		return adev->mode_info.crtcs[0];
324 
325 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
326 		amdgpu_crtc = to_amdgpu_crtc(crtc);
327 
328 		if (amdgpu_crtc->otg_inst == otg_inst)
329 			return amdgpu_crtc;
330 	}
331 
332 	return NULL;
333 }
334 
335 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
336 {
337 	return acrtc->dm_irq_params.freesync_config.state ==
338 		       VRR_STATE_ACTIVE_VARIABLE ||
339 	       acrtc->dm_irq_params.freesync_config.state ==
340 		       VRR_STATE_ACTIVE_FIXED;
341 }
342 
343 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
344 {
345 	return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
346 	       dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
347 }
348 
349 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
350 					      struct dm_crtc_state *new_state)
351 {
352 	if (new_state->freesync_config.state ==  VRR_STATE_ACTIVE_FIXED)
353 		return true;
354 	else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
355 		return true;
356 	else
357 		return false;
358 }
359 
360 /**
361  * dm_pflip_high_irq() - Handle pageflip interrupt
362  * @interrupt_params: ignored
363  *
364  * Handles the pageflip interrupt by notifying all interested parties
365  * that the pageflip has been completed.
366  */
367 static void dm_pflip_high_irq(void *interrupt_params)
368 {
369 	struct amdgpu_crtc *amdgpu_crtc;
370 	struct common_irq_params *irq_params = interrupt_params;
371 	struct amdgpu_device *adev = irq_params->adev;
372 	unsigned long flags;
373 	struct drm_pending_vblank_event *e;
374 	uint32_t vpos, hpos, v_blank_start, v_blank_end;
375 	bool vrr_active;
376 
377 	amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
378 
379 	/* IRQ could occur when in initial stage */
380 	/* TODO work and BO cleanup */
381 	if (amdgpu_crtc == NULL) {
382 		DC_LOG_PFLIP("CRTC is null, returning.\n");
383 		return;
384 	}
385 
386 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
387 
388 	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
389 		DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
390 						 amdgpu_crtc->pflip_status,
391 						 AMDGPU_FLIP_SUBMITTED,
392 						 amdgpu_crtc->crtc_id,
393 						 amdgpu_crtc);
394 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
395 		return;
396 	}
397 
398 	/* page flip completed. */
399 	e = amdgpu_crtc->event;
400 	amdgpu_crtc->event = NULL;
401 
402 	WARN_ON(!e);
403 
404 	vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
405 
406 	/* Fixed refresh rate, or VRR scanout position outside front-porch? */
407 	if (!vrr_active ||
408 	    !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
409 				      &v_blank_end, &hpos, &vpos) ||
410 	    (vpos < v_blank_start)) {
411 		/* Update to correct count and vblank timestamp if racing with
412 		 * vblank irq. This also updates to the correct vblank timestamp
413 		 * even in VRR mode, as scanout is past the front-porch atm.
414 		 */
415 		drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
416 
417 		/* Wake up userspace by sending the pageflip event with proper
418 		 * count and timestamp of vblank of flip completion.
419 		 */
420 		if (e) {
421 			drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
422 
423 			/* Event sent, so done with vblank for this flip */
424 			drm_crtc_vblank_put(&amdgpu_crtc->base);
425 		}
426 	} else if (e) {
427 		/* VRR active and inside front-porch: vblank count and
428 		 * timestamp for pageflip event will only be up to date after
429 		 * drm_crtc_handle_vblank() has been executed from late vblank
430 		 * irq handler after start of back-porch (vline 0). We queue the
431 		 * pageflip event for send-out by drm_crtc_handle_vblank() with
432 		 * updated timestamp and count, once it runs after us.
433 		 *
434 		 * We need to open-code this instead of using the helper
435 		 * drm_crtc_arm_vblank_event(), as that helper would
436 		 * call drm_crtc_accurate_vblank_count(), which we must
437 		 * not call in VRR mode while we are in front-porch!
438 		 */
439 
440 		/* sequence will be replaced by real count during send-out. */
441 		e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
442 		e->pipe = amdgpu_crtc->crtc_id;
443 
444 		list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
445 		e = NULL;
446 	}
447 
448 	/* Keep track of vblank of this flip for flip throttling. We use the
449 	 * cooked hw counter, as that one incremented at start of this vblank
450 	 * of pageflip completion, so last_flip_vblank is the forbidden count
451 	 * for queueing new pageflips if vsync + VRR is enabled.
452 	 */
453 	amdgpu_crtc->dm_irq_params.last_flip_vblank =
454 		amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
455 
456 	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
457 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
458 
459 	DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
460 		     amdgpu_crtc->crtc_id, amdgpu_crtc,
461 		     vrr_active, (int) !e);
462 }
463 
464 static void dm_vupdate_high_irq(void *interrupt_params)
465 {
466 	struct common_irq_params *irq_params = interrupt_params;
467 	struct amdgpu_device *adev = irq_params->adev;
468 	struct amdgpu_crtc *acrtc;
469 	struct drm_device *drm_dev;
470 	struct drm_vblank_crtc *vblank;
471 	ktime_t frame_duration_ns, previous_timestamp;
472 	unsigned long flags;
473 	int vrr_active;
474 
475 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
476 
477 	if (acrtc) {
478 		vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
479 		drm_dev = acrtc->base.dev;
480 		vblank = &drm_dev->vblank[acrtc->base.index];
481 		previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
482 		frame_duration_ns = vblank->time - previous_timestamp;
483 
484 		if (frame_duration_ns > 0) {
485 			trace_amdgpu_refresh_rate_track(acrtc->base.index,
486 						frame_duration_ns,
487 						ktime_divns(NSEC_PER_SEC, frame_duration_ns));
488 			atomic64_set(&irq_params->previous_timestamp, vblank->time);
489 		}
490 
491 		DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
492 			      acrtc->crtc_id,
493 			      vrr_active);
494 
495 		/* Core vblank handling is done here after end of front-porch in
496 		 * vrr mode, as vblank timestamping will give valid results
497 		 * while now done after front-porch. This will also deliver
498 		 * page-flip completion events that have been queued to us
499 		 * if a pageflip happened inside front-porch.
500 		 */
501 		if (vrr_active) {
502 			drm_crtc_handle_vblank(&acrtc->base);
503 
504 			/* BTR processing for pre-DCE12 ASICs */
505 			if (acrtc->dm_irq_params.stream &&
506 			    adev->family < AMDGPU_FAMILY_AI) {
507 				spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
508 				mod_freesync_handle_v_update(
509 				    adev->dm.freesync_module,
510 				    acrtc->dm_irq_params.stream,
511 				    &acrtc->dm_irq_params.vrr_params);
512 
513 				dc_stream_adjust_vmin_vmax(
514 				    adev->dm.dc,
515 				    acrtc->dm_irq_params.stream,
516 				    &acrtc->dm_irq_params.vrr_params.adjust);
517 				spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
518 			}
519 		}
520 	}
521 }
522 
523 /**
524  * dm_crtc_high_irq() - Handles CRTC interrupt
525  * @interrupt_params: used for determining the CRTC instance
526  *
527  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
528  * event handler.
529  */
530 static void dm_crtc_high_irq(void *interrupt_params)
531 {
532 	struct common_irq_params *irq_params = interrupt_params;
533 	struct amdgpu_device *adev = irq_params->adev;
534 	struct amdgpu_crtc *acrtc;
535 	unsigned long flags;
536 	int vrr_active;
537 
538 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
539 	if (!acrtc)
540 		return;
541 
542 	vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
543 
544 	DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
545 		      vrr_active, acrtc->dm_irq_params.active_planes);
546 
547 	/**
548 	 * Core vblank handling at start of front-porch is only possible
549 	 * in non-vrr mode, as only there vblank timestamping will give
550 	 * valid results while done in front-porch. Otherwise defer it
551 	 * to dm_vupdate_high_irq after end of front-porch.
552 	 */
553 	if (!vrr_active)
554 		drm_crtc_handle_vblank(&acrtc->base);
555 
556 	/**
557 	 * Following stuff must happen at start of vblank, for crc
558 	 * computation and below-the-range btr support in vrr mode.
559 	 */
560 	amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
561 
562 	/* BTR updates need to happen before VUPDATE on Vega and above. */
563 	if (adev->family < AMDGPU_FAMILY_AI)
564 		return;
565 
566 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
567 
568 	if (acrtc->dm_irq_params.stream &&
569 	    acrtc->dm_irq_params.vrr_params.supported &&
570 	    acrtc->dm_irq_params.freesync_config.state ==
571 		    VRR_STATE_ACTIVE_VARIABLE) {
572 		mod_freesync_handle_v_update(adev->dm.freesync_module,
573 					     acrtc->dm_irq_params.stream,
574 					     &acrtc->dm_irq_params.vrr_params);
575 
576 		dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
577 					   &acrtc->dm_irq_params.vrr_params.adjust);
578 	}
579 
580 	/*
581 	 * If there aren't any active_planes then DCH HUBP may be clock-gated.
582 	 * In that case, pageflip completion interrupts won't fire and pageflip
583 	 * completion events won't get delivered. Prevent this by sending
584 	 * pending pageflip events from here if a flip is still pending.
585 	 *
586 	 * If any planes are enabled, use dm_pflip_high_irq() instead, to
587 	 * avoid race conditions between flip programming and completion,
588 	 * which could cause too early flip completion events.
589 	 */
590 	if (adev->family >= AMDGPU_FAMILY_RV &&
591 	    acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
592 	    acrtc->dm_irq_params.active_planes == 0) {
593 		if (acrtc->event) {
594 			drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
595 			acrtc->event = NULL;
596 			drm_crtc_vblank_put(&acrtc->base);
597 		}
598 		acrtc->pflip_status = AMDGPU_FLIP_NONE;
599 	}
600 
601 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
602 }
603 
604 #if defined(CONFIG_DRM_AMD_DC_DCN)
605 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
606 /**
607  * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
608  * DCN generation ASICs
609  * @interrupt_params: interrupt parameters
610  *
611  * Used to set crc window/read out crc value at vertical line 0 position
612  */
613 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
614 {
615 	struct common_irq_params *irq_params = interrupt_params;
616 	struct amdgpu_device *adev = irq_params->adev;
617 	struct amdgpu_crtc *acrtc;
618 
619 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
620 
621 	if (!acrtc)
622 		return;
623 
624 	amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
625 }
626 #endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */
627 
628 /**
629  * dmub_aux_setconfig_callback - Callback for AUX or SET_CONFIG command.
630  * @adev: amdgpu_device pointer
631  * @notify: dmub notification structure
632  *
633  * Dmub AUX or SET_CONFIG command completion processing callback
634  * Copies dmub notification to DM which is to be read by AUX command.
635  * issuing thread and also signals the event to wake up the thread.
636  */
637 static void dmub_aux_setconfig_callback(struct amdgpu_device *adev,
638 					struct dmub_notification *notify)
639 {
640 	if (adev->dm.dmub_notify)
641 		memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification));
642 	if (notify->type == DMUB_NOTIFICATION_AUX_REPLY)
643 		complete(&adev->dm.dmub_aux_transfer_done);
644 }
645 
646 /**
647  * dmub_hpd_callback - DMUB HPD interrupt processing callback.
648  * @adev: amdgpu_device pointer
649  * @notify: dmub notification structure
650  *
651  * Dmub Hpd interrupt processing callback. Gets displayindex through the
652  * ink index and calls helper to do the processing.
653  */
654 static void dmub_hpd_callback(struct amdgpu_device *adev,
655 			      struct dmub_notification *notify)
656 {
657 	struct amdgpu_dm_connector *aconnector;
658 	struct amdgpu_dm_connector *hpd_aconnector = NULL;
659 	struct drm_connector *connector;
660 	struct drm_connector_list_iter iter;
661 	struct dc_link *link;
662 	uint8_t link_index = 0;
663 	struct drm_device *dev;
664 
665 	if (adev == NULL)
666 		return;
667 
668 	if (notify == NULL) {
669 		DRM_ERROR("DMUB HPD callback notification was NULL");
670 		return;
671 	}
672 
673 	if (notify->link_index > adev->dm.dc->link_count) {
674 		DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index);
675 		return;
676 	}
677 
678 	link_index = notify->link_index;
679 	link = adev->dm.dc->links[link_index];
680 	dev = adev->dm.ddev;
681 
682 	drm_connector_list_iter_begin(dev, &iter);
683 	drm_for_each_connector_iter(connector, &iter) {
684 		aconnector = to_amdgpu_dm_connector(connector);
685 		if (link && aconnector->dc_link == link) {
686 			DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index);
687 			hpd_aconnector = aconnector;
688 			break;
689 		}
690 	}
691 	drm_connector_list_iter_end(&iter);
692 
693 	if (hpd_aconnector) {
694 		if (notify->type == DMUB_NOTIFICATION_HPD)
695 			handle_hpd_irq_helper(hpd_aconnector);
696 		else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ)
697 			handle_hpd_rx_irq(hpd_aconnector);
698 	}
699 }
700 
701 /**
702  * register_dmub_notify_callback - Sets callback for DMUB notify
703  * @adev: amdgpu_device pointer
704  * @type: Type of dmub notification
705  * @callback: Dmub interrupt callback function
706  * @dmub_int_thread_offload: offload indicator
707  *
708  * API to register a dmub callback handler for a dmub notification
709  * Also sets indicator whether callback processing to be offloaded.
710  * to dmub interrupt handling thread
711  * Return: true if successfully registered, false if there is existing registration
712  */
713 static bool register_dmub_notify_callback(struct amdgpu_device *adev,
714 					  enum dmub_notification_type type,
715 					  dmub_notify_interrupt_callback_t callback,
716 					  bool dmub_int_thread_offload)
717 {
718 	if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) {
719 		adev->dm.dmub_callback[type] = callback;
720 		adev->dm.dmub_thread_offload[type] = dmub_int_thread_offload;
721 	} else
722 		return false;
723 
724 	return true;
725 }
726 
727 static void dm_handle_hpd_work(struct work_struct *work)
728 {
729 	struct dmub_hpd_work *dmub_hpd_wrk;
730 
731 	dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work);
732 
733 	if (!dmub_hpd_wrk->dmub_notify) {
734 		DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL");
735 		return;
736 	}
737 
738 	if (dmub_hpd_wrk->dmub_notify->type < ARRAY_SIZE(dmub_hpd_wrk->adev->dm.dmub_callback)) {
739 		dmub_hpd_wrk->adev->dm.dmub_callback[dmub_hpd_wrk->dmub_notify->type](dmub_hpd_wrk->adev,
740 		dmub_hpd_wrk->dmub_notify);
741 	}
742 
743 	kfree(dmub_hpd_wrk->dmub_notify);
744 	kfree(dmub_hpd_wrk);
745 
746 }
747 
748 #define DMUB_TRACE_MAX_READ 64
749 /**
750  * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
751  * @interrupt_params: used for determining the Outbox instance
752  *
753  * Handles the Outbox Interrupt
754  * event handler.
755  */
756 static void dm_dmub_outbox1_low_irq(void *interrupt_params)
757 {
758 	struct dmub_notification notify;
759 	struct common_irq_params *irq_params = interrupt_params;
760 	struct amdgpu_device *adev = irq_params->adev;
761 	struct amdgpu_display_manager *dm = &adev->dm;
762 	struct dmcub_trace_buf_entry entry = { 0 };
763 	uint32_t count = 0;
764 	struct dmub_hpd_work *dmub_hpd_wrk;
765 	struct dc_link *plink = NULL;
766 
767 	if (dc_enable_dmub_notifications(adev->dm.dc) &&
768 		irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
769 
770 		do {
771 			dc_stat_get_dmub_notification(adev->dm.dc, &notify);
772 			if (notify.type > ARRAY_SIZE(dm->dmub_thread_offload)) {
773 				DRM_ERROR("DM: notify type %d invalid!", notify.type);
774 				continue;
775 			}
776 			if (!dm->dmub_callback[notify.type]) {
777 				DRM_DEBUG_DRIVER("DMUB notification skipped, no handler: type=%d\n", notify.type);
778 				continue;
779 			}
780 			if (dm->dmub_thread_offload[notify.type] == true) {
781 				dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC);
782 				if (!dmub_hpd_wrk) {
783 					DRM_ERROR("Failed to allocate dmub_hpd_wrk");
784 					return;
785 				}
786 				dmub_hpd_wrk->dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_ATOMIC);
787 				if (!dmub_hpd_wrk->dmub_notify) {
788 					kfree(dmub_hpd_wrk);
789 					DRM_ERROR("Failed to allocate dmub_hpd_wrk->dmub_notify");
790 					return;
791 				}
792 				INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work);
793 				if (dmub_hpd_wrk->dmub_notify)
794 					memcpy(dmub_hpd_wrk->dmub_notify, &notify, sizeof(struct dmub_notification));
795 				dmub_hpd_wrk->adev = adev;
796 				if (notify.type == DMUB_NOTIFICATION_HPD) {
797 					plink = adev->dm.dc->links[notify.link_index];
798 					if (plink) {
799 						plink->hpd_status =
800 							notify.hpd_status == DP_HPD_PLUG;
801 					}
802 				}
803 				queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work);
804 			} else {
805 				dm->dmub_callback[notify.type](adev, &notify);
806 			}
807 		} while (notify.pending_notification);
808 	}
809 
810 
811 	do {
812 		if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
813 			trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
814 							entry.param0, entry.param1);
815 
816 			DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
817 				 entry.trace_code, entry.tick_count, entry.param0, entry.param1);
818 		} else
819 			break;
820 
821 		count++;
822 
823 	} while (count <= DMUB_TRACE_MAX_READ);
824 
825 	if (count > DMUB_TRACE_MAX_READ)
826 		DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ");
827 }
828 #endif /* CONFIG_DRM_AMD_DC_DCN */
829 
830 static int dm_set_clockgating_state(void *handle,
831 		  enum amd_clockgating_state state)
832 {
833 	return 0;
834 }
835 
836 static int dm_set_powergating_state(void *handle,
837 		  enum amd_powergating_state state)
838 {
839 	return 0;
840 }
841 
842 /* Prototypes of private functions */
843 static int dm_early_init(void* handle);
844 
845 /* Allocate memory for FBC compressed data  */
846 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
847 {
848 	struct drm_device *dev = connector->dev;
849 	struct amdgpu_device *adev = drm_to_adev(dev);
850 	struct dm_compressor_info *compressor = &adev->dm.compressor;
851 	struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
852 	struct drm_display_mode *mode;
853 	unsigned long max_size = 0;
854 
855 	if (adev->dm.dc->fbc_compressor == NULL)
856 		return;
857 
858 	if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
859 		return;
860 
861 	if (compressor->bo_ptr)
862 		return;
863 
864 
865 	list_for_each_entry(mode, &connector->modes, head) {
866 		if (max_size < mode->htotal * mode->vtotal)
867 			max_size = mode->htotal * mode->vtotal;
868 	}
869 
870 	if (max_size) {
871 		int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
872 			    AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
873 			    &compressor->gpu_addr, &compressor->cpu_addr);
874 
875 		if (r)
876 			DRM_ERROR("DM: Failed to initialize FBC\n");
877 		else {
878 			adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
879 			DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
880 		}
881 
882 	}
883 
884 }
885 
886 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
887 					  int pipe, bool *enabled,
888 					  unsigned char *buf, int max_bytes)
889 {
890 	struct drm_device *dev = dev_get_drvdata(kdev);
891 	struct amdgpu_device *adev = drm_to_adev(dev);
892 	struct drm_connector *connector;
893 	struct drm_connector_list_iter conn_iter;
894 	struct amdgpu_dm_connector *aconnector;
895 	int ret = 0;
896 
897 	*enabled = false;
898 
899 	mutex_lock(&adev->dm.audio_lock);
900 
901 	drm_connector_list_iter_begin(dev, &conn_iter);
902 	drm_for_each_connector_iter(connector, &conn_iter) {
903 		aconnector = to_amdgpu_dm_connector(connector);
904 		if (aconnector->audio_inst != port)
905 			continue;
906 
907 		*enabled = true;
908 		ret = drm_eld_size(connector->eld);
909 		memcpy(buf, connector->eld, min(max_bytes, ret));
910 
911 		break;
912 	}
913 	drm_connector_list_iter_end(&conn_iter);
914 
915 	mutex_unlock(&adev->dm.audio_lock);
916 
917 	DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
918 
919 	return ret;
920 }
921 
922 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
923 	.get_eld = amdgpu_dm_audio_component_get_eld,
924 };
925 
926 static int amdgpu_dm_audio_component_bind(struct device *kdev,
927 				       struct device *hda_kdev, void *data)
928 {
929 	struct drm_device *dev = dev_get_drvdata(kdev);
930 	struct amdgpu_device *adev = drm_to_adev(dev);
931 	struct drm_audio_component *acomp = data;
932 
933 	acomp->ops = &amdgpu_dm_audio_component_ops;
934 	acomp->dev = kdev;
935 	adev->dm.audio_component = acomp;
936 
937 	return 0;
938 }
939 
940 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
941 					  struct device *hda_kdev, void *data)
942 {
943 	struct drm_device *dev = dev_get_drvdata(kdev);
944 	struct amdgpu_device *adev = drm_to_adev(dev);
945 	struct drm_audio_component *acomp = data;
946 
947 	acomp->ops = NULL;
948 	acomp->dev = NULL;
949 	adev->dm.audio_component = NULL;
950 }
951 
952 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
953 	.bind	= amdgpu_dm_audio_component_bind,
954 	.unbind	= amdgpu_dm_audio_component_unbind,
955 };
956 
957 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
958 {
959 	int i, ret;
960 
961 	if (!amdgpu_audio)
962 		return 0;
963 
964 	adev->mode_info.audio.enabled = true;
965 
966 	adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
967 
968 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
969 		adev->mode_info.audio.pin[i].channels = -1;
970 		adev->mode_info.audio.pin[i].rate = -1;
971 		adev->mode_info.audio.pin[i].bits_per_sample = -1;
972 		adev->mode_info.audio.pin[i].status_bits = 0;
973 		adev->mode_info.audio.pin[i].category_code = 0;
974 		adev->mode_info.audio.pin[i].connected = false;
975 		adev->mode_info.audio.pin[i].id =
976 			adev->dm.dc->res_pool->audios[i]->inst;
977 		adev->mode_info.audio.pin[i].offset = 0;
978 	}
979 
980 	ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
981 	if (ret < 0)
982 		return ret;
983 
984 	adev->dm.audio_registered = true;
985 
986 	return 0;
987 }
988 
989 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
990 {
991 	if (!amdgpu_audio)
992 		return;
993 
994 	if (!adev->mode_info.audio.enabled)
995 		return;
996 
997 	if (adev->dm.audio_registered) {
998 		component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
999 		adev->dm.audio_registered = false;
1000 	}
1001 
1002 	/* TODO: Disable audio? */
1003 
1004 	adev->mode_info.audio.enabled = false;
1005 }
1006 
1007 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
1008 {
1009 	struct drm_audio_component *acomp = adev->dm.audio_component;
1010 
1011 	if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
1012 		DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
1013 
1014 		acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
1015 						 pin, -1);
1016 	}
1017 }
1018 
1019 static int dm_dmub_hw_init(struct amdgpu_device *adev)
1020 {
1021 	const struct dmcub_firmware_header_v1_0 *hdr;
1022 	struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1023 	struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
1024 	const struct firmware *dmub_fw = adev->dm.dmub_fw;
1025 	struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
1026 	struct abm *abm = adev->dm.dc->res_pool->abm;
1027 	struct dmub_srv_hw_params hw_params;
1028 	enum dmub_status status;
1029 	const unsigned char *fw_inst_const, *fw_bss_data;
1030 	uint32_t i, fw_inst_const_size, fw_bss_data_size;
1031 	bool has_hw_support;
1032 
1033 	if (!dmub_srv)
1034 		/* DMUB isn't supported on the ASIC. */
1035 		return 0;
1036 
1037 	if (!fb_info) {
1038 		DRM_ERROR("No framebuffer info for DMUB service.\n");
1039 		return -EINVAL;
1040 	}
1041 
1042 	if (!dmub_fw) {
1043 		/* Firmware required for DMUB support. */
1044 		DRM_ERROR("No firmware provided for DMUB.\n");
1045 		return -EINVAL;
1046 	}
1047 
1048 	status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
1049 	if (status != DMUB_STATUS_OK) {
1050 		DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
1051 		return -EINVAL;
1052 	}
1053 
1054 	if (!has_hw_support) {
1055 		DRM_INFO("DMUB unsupported on ASIC\n");
1056 		return 0;
1057 	}
1058 
1059 	/* Reset DMCUB if it was previously running - before we overwrite its memory. */
1060 	status = dmub_srv_hw_reset(dmub_srv);
1061 	if (status != DMUB_STATUS_OK)
1062 		DRM_WARN("Error resetting DMUB HW: %d\n", status);
1063 
1064 	hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
1065 
1066 	fw_inst_const = dmub_fw->data +
1067 			le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1068 			PSP_HEADER_BYTES;
1069 
1070 	fw_bss_data = dmub_fw->data +
1071 		      le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1072 		      le32_to_cpu(hdr->inst_const_bytes);
1073 
1074 	/* Copy firmware and bios info into FB memory. */
1075 	fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1076 			     PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1077 
1078 	fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1079 
1080 	/* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
1081 	 * amdgpu_ucode_init_single_fw will load dmub firmware
1082 	 * fw_inst_const part to cw0; otherwise, the firmware back door load
1083 	 * will be done by dm_dmub_hw_init
1084 	 */
1085 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1086 		memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
1087 				fw_inst_const_size);
1088 	}
1089 
1090 	if (fw_bss_data_size)
1091 		memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
1092 		       fw_bss_data, fw_bss_data_size);
1093 
1094 	/* Copy firmware bios info into FB memory. */
1095 	memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
1096 	       adev->bios_size);
1097 
1098 	/* Reset regions that need to be reset. */
1099 	memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
1100 	fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
1101 
1102 	memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
1103 	       fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
1104 
1105 	memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
1106 	       fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
1107 
1108 	/* Initialize hardware. */
1109 	memset(&hw_params, 0, sizeof(hw_params));
1110 	hw_params.fb_base = adev->gmc.fb_start;
1111 	hw_params.fb_offset = adev->gmc.aper_base;
1112 
1113 	/* backdoor load firmware and trigger dmub running */
1114 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
1115 		hw_params.load_inst_const = true;
1116 
1117 	if (dmcu)
1118 		hw_params.psp_version = dmcu->psp_version;
1119 
1120 	for (i = 0; i < fb_info->num_fb; ++i)
1121 		hw_params.fb[i] = &fb_info->fb[i];
1122 
1123 	switch (adev->ip_versions[DCE_HWIP][0]) {
1124 	case IP_VERSION(3, 1, 3): /* Only for this asic hw internal rev B0 */
1125 		hw_params.dpia_supported = true;
1126 #if defined(CONFIG_DRM_AMD_DC_DCN)
1127 		hw_params.disable_dpia = adev->dm.dc->debug.dpia_debug.bits.disable_dpia;
1128 #endif
1129 		break;
1130 	default:
1131 		break;
1132 	}
1133 
1134 	status = dmub_srv_hw_init(dmub_srv, &hw_params);
1135 	if (status != DMUB_STATUS_OK) {
1136 		DRM_ERROR("Error initializing DMUB HW: %d\n", status);
1137 		return -EINVAL;
1138 	}
1139 
1140 	/* Wait for firmware load to finish. */
1141 	status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1142 	if (status != DMUB_STATUS_OK)
1143 		DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1144 
1145 	/* Init DMCU and ABM if available. */
1146 	if (dmcu && abm) {
1147 		dmcu->funcs->dmcu_init(dmcu);
1148 		abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1149 	}
1150 
1151 	if (!adev->dm.dc->ctx->dmub_srv)
1152 		adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
1153 	if (!adev->dm.dc->ctx->dmub_srv) {
1154 		DRM_ERROR("Couldn't allocate DC DMUB server!\n");
1155 		return -ENOMEM;
1156 	}
1157 
1158 	DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
1159 		 adev->dm.dmcub_fw_version);
1160 
1161 	return 0;
1162 }
1163 
1164 static void dm_dmub_hw_resume(struct amdgpu_device *adev)
1165 {
1166 	struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1167 	enum dmub_status status;
1168 	bool init;
1169 
1170 	if (!dmub_srv) {
1171 		/* DMUB isn't supported on the ASIC. */
1172 		return;
1173 	}
1174 
1175 	status = dmub_srv_is_hw_init(dmub_srv, &init);
1176 	if (status != DMUB_STATUS_OK)
1177 		DRM_WARN("DMUB hardware init check failed: %d\n", status);
1178 
1179 	if (status == DMUB_STATUS_OK && init) {
1180 		/* Wait for firmware load to finish. */
1181 		status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1182 		if (status != DMUB_STATUS_OK)
1183 			DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1184 	} else {
1185 		/* Perform the full hardware initialization. */
1186 		dm_dmub_hw_init(adev);
1187 	}
1188 }
1189 
1190 #if defined(CONFIG_DRM_AMD_DC_DCN)
1191 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
1192 {
1193 	uint64_t pt_base;
1194 	uint32_t logical_addr_low;
1195 	uint32_t logical_addr_high;
1196 	uint32_t agp_base, agp_bot, agp_top;
1197 	PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
1198 
1199 	memset(pa_config, 0, sizeof(*pa_config));
1200 
1201 	logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
1202 	pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
1203 
1204 	if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1205 		/*
1206 		 * Raven2 has a HW issue that it is unable to use the vram which
1207 		 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1208 		 * workaround that increase system aperture high address (add 1)
1209 		 * to get rid of the VM fault and hardware hang.
1210 		 */
1211 		logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
1212 	else
1213 		logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
1214 
1215 	agp_base = 0;
1216 	agp_bot = adev->gmc.agp_start >> 24;
1217 	agp_top = adev->gmc.agp_end >> 24;
1218 
1219 
1220 	page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
1221 	page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
1222 	page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
1223 	page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
1224 	page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
1225 	page_table_base.low_part = lower_32_bits(pt_base);
1226 
1227 	pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1228 	pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1229 
1230 	pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1231 	pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1232 	pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1233 
1234 	pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1235 	pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1236 	pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1237 
1238 	pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1239 	pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1240 	pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1241 
1242 	pa_config->is_hvm_enabled = 0;
1243 
1244 }
1245 #endif
1246 #if defined(CONFIG_DRM_AMD_DC_DCN)
1247 static void vblank_control_worker(struct work_struct *work)
1248 {
1249 	struct vblank_control_work *vblank_work =
1250 		container_of(work, struct vblank_control_work, work);
1251 	struct amdgpu_display_manager *dm = vblank_work->dm;
1252 
1253 	mutex_lock(&dm->dc_lock);
1254 
1255 	if (vblank_work->enable)
1256 		dm->active_vblank_irq_count++;
1257 	else if(dm->active_vblank_irq_count)
1258 		dm->active_vblank_irq_count--;
1259 
1260 	dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
1261 
1262 	DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
1263 
1264 	/* Control PSR based on vblank requirements from OS */
1265 	if (vblank_work->stream && vblank_work->stream->link) {
1266 		if (vblank_work->enable) {
1267 			if (vblank_work->stream->link->psr_settings.psr_allow_active)
1268 				amdgpu_dm_psr_disable(vblank_work->stream);
1269 		} else if (vblank_work->stream->link->psr_settings.psr_feature_enabled &&
1270 			   !vblank_work->stream->link->psr_settings.psr_allow_active &&
1271 			   vblank_work->acrtc->dm_irq_params.allow_psr_entry) {
1272 			amdgpu_dm_psr_enable(vblank_work->stream);
1273 		}
1274 	}
1275 
1276 	mutex_unlock(&dm->dc_lock);
1277 
1278 	dc_stream_release(vblank_work->stream);
1279 
1280 	kfree(vblank_work);
1281 }
1282 
1283 #endif
1284 
1285 static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
1286 {
1287 	struct hpd_rx_irq_offload_work *offload_work;
1288 	struct amdgpu_dm_connector *aconnector;
1289 	struct dc_link *dc_link;
1290 	struct amdgpu_device *adev;
1291 	enum dc_connection_type new_connection_type = dc_connection_none;
1292 	unsigned long flags;
1293 
1294 	offload_work = container_of(work, struct hpd_rx_irq_offload_work, work);
1295 	aconnector = offload_work->offload_wq->aconnector;
1296 
1297 	if (!aconnector) {
1298 		DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work");
1299 		goto skip;
1300 	}
1301 
1302 	adev = drm_to_adev(aconnector->base.dev);
1303 	dc_link = aconnector->dc_link;
1304 
1305 	mutex_lock(&aconnector->hpd_lock);
1306 	if (!dc_link_detect_sink(dc_link, &new_connection_type))
1307 		DRM_ERROR("KMS: Failed to detect connector\n");
1308 	mutex_unlock(&aconnector->hpd_lock);
1309 
1310 	if (new_connection_type == dc_connection_none)
1311 		goto skip;
1312 
1313 	if (amdgpu_in_reset(adev))
1314 		goto skip;
1315 
1316 	mutex_lock(&adev->dm.dc_lock);
1317 	if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST)
1318 		dc_link_dp_handle_automated_test(dc_link);
1319 	else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
1320 			hpd_rx_irq_check_link_loss_status(dc_link, &offload_work->data) &&
1321 			dc_link_dp_allow_hpd_rx_irq(dc_link)) {
1322 		dc_link_dp_handle_link_loss(dc_link);
1323 		spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
1324 		offload_work->offload_wq->is_handling_link_loss = false;
1325 		spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
1326 	}
1327 	mutex_unlock(&adev->dm.dc_lock);
1328 
1329 skip:
1330 	kfree(offload_work);
1331 
1332 }
1333 
1334 static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc)
1335 {
1336 	int max_caps = dc->caps.max_links;
1337 	int i = 0;
1338 	struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL;
1339 
1340 	hpd_rx_offload_wq = kcalloc(max_caps, sizeof(*hpd_rx_offload_wq), GFP_KERNEL);
1341 
1342 	if (!hpd_rx_offload_wq)
1343 		return NULL;
1344 
1345 
1346 	for (i = 0; i < max_caps; i++) {
1347 		hpd_rx_offload_wq[i].wq =
1348 				    create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq");
1349 
1350 		if (hpd_rx_offload_wq[i].wq == NULL) {
1351 			DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!");
1352 			return NULL;
1353 		}
1354 
1355 		spin_lock_init(&hpd_rx_offload_wq[i].offload_lock);
1356 	}
1357 
1358 	return hpd_rx_offload_wq;
1359 }
1360 
1361 struct amdgpu_stutter_quirk {
1362 	u16 chip_vendor;
1363 	u16 chip_device;
1364 	u16 subsys_vendor;
1365 	u16 subsys_device;
1366 	u8 revision;
1367 };
1368 
1369 static const struct amdgpu_stutter_quirk amdgpu_stutter_quirk_list[] = {
1370 	/* https://bugzilla.kernel.org/show_bug.cgi?id=214417 */
1371 	{ 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
1372 	{ 0, 0, 0, 0, 0 },
1373 };
1374 
1375 static bool dm_should_disable_stutter(struct pci_dev *pdev)
1376 {
1377 	const struct amdgpu_stutter_quirk *p = amdgpu_stutter_quirk_list;
1378 
1379 	while (p && p->chip_device != 0) {
1380 		if (pdev->vendor == p->chip_vendor &&
1381 		    pdev->device == p->chip_device &&
1382 		    pdev->subsystem_vendor == p->subsys_vendor &&
1383 		    pdev->subsystem_device == p->subsys_device &&
1384 		    pdev->revision == p->revision) {
1385 			return true;
1386 		}
1387 		++p;
1388 	}
1389 	return false;
1390 }
1391 
1392 static int amdgpu_dm_init(struct amdgpu_device *adev)
1393 {
1394 	struct dc_init_data init_data;
1395 #ifdef CONFIG_DRM_AMD_DC_HDCP
1396 	struct dc_callback_init init_params;
1397 #endif
1398 	int r;
1399 
1400 	adev->dm.ddev = adev_to_drm(adev);
1401 	adev->dm.adev = adev;
1402 
1403 	/* Zero all the fields */
1404 	memset(&init_data, 0, sizeof(init_data));
1405 #ifdef CONFIG_DRM_AMD_DC_HDCP
1406 	memset(&init_params, 0, sizeof(init_params));
1407 #endif
1408 
1409 	mutex_init(&adev->dm.dc_lock);
1410 	mutex_init(&adev->dm.audio_lock);
1411 #if defined(CONFIG_DRM_AMD_DC_DCN)
1412 	spin_lock_init(&adev->dm.vblank_lock);
1413 #endif
1414 
1415 	if(amdgpu_dm_irq_init(adev)) {
1416 		DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1417 		goto error;
1418 	}
1419 
1420 	init_data.asic_id.chip_family = adev->family;
1421 
1422 	init_data.asic_id.pci_revision_id = adev->pdev->revision;
1423 	init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1424 	init_data.asic_id.chip_id = adev->pdev->device;
1425 
1426 	init_data.asic_id.vram_width = adev->gmc.vram_width;
1427 	/* TODO: initialize init_data.asic_id.vram_type here!!!! */
1428 	init_data.asic_id.atombios_base_address =
1429 		adev->mode_info.atom_context->bios;
1430 
1431 	init_data.driver = adev;
1432 
1433 	adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1434 
1435 	if (!adev->dm.cgs_device) {
1436 		DRM_ERROR("amdgpu: failed to create cgs device.\n");
1437 		goto error;
1438 	}
1439 
1440 	init_data.cgs_device = adev->dm.cgs_device;
1441 
1442 	init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1443 
1444 	switch (adev->asic_type) {
1445 	case CHIP_CARRIZO:
1446 	case CHIP_STONEY:
1447 		init_data.flags.gpu_vm_support = true;
1448 		break;
1449 	default:
1450 		switch (adev->ip_versions[DCE_HWIP][0]) {
1451 		case IP_VERSION(2, 1, 0):
1452 			init_data.flags.gpu_vm_support = true;
1453 			switch (adev->dm.dmcub_fw_version) {
1454 			case 0: /* development */
1455 			case 0x1: /* linux-firmware.git hash 6d9f399 */
1456 			case 0x01000000: /* linux-firmware.git hash 9a0b0f4 */
1457 				init_data.flags.disable_dmcu = false;
1458 				break;
1459 			default:
1460 				init_data.flags.disable_dmcu = true;
1461 			}
1462 			break;
1463 		case IP_VERSION(1, 0, 0):
1464 		case IP_VERSION(1, 0, 1):
1465 		case IP_VERSION(3, 0, 1):
1466 		case IP_VERSION(3, 1, 2):
1467 		case IP_VERSION(3, 1, 3):
1468 			init_data.flags.gpu_vm_support = true;
1469 			break;
1470 		case IP_VERSION(2, 0, 3):
1471 			init_data.flags.disable_dmcu = true;
1472 			break;
1473 		default:
1474 			break;
1475 		}
1476 		break;
1477 	}
1478 
1479 	if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1480 		init_data.flags.fbc_support = true;
1481 
1482 	if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1483 		init_data.flags.multi_mon_pp_mclk_switch = true;
1484 
1485 	if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1486 		init_data.flags.disable_fractional_pwm = true;
1487 
1488 	if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING)
1489 		init_data.flags.edp_no_power_sequencing = true;
1490 
1491 #ifdef CONFIG_DRM_AMD_DC_DCN
1492 	if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP1_4A)
1493 		init_data.flags.allow_lttpr_non_transparent_mode.bits.DP1_4A = true;
1494 	if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP2_0)
1495 		init_data.flags.allow_lttpr_non_transparent_mode.bits.DP2_0 = true;
1496 #endif
1497 
1498 	init_data.flags.seamless_boot_edp_requested = false;
1499 
1500 	if (check_seamless_boot_capability(adev)) {
1501 		init_data.flags.seamless_boot_edp_requested = true;
1502 		init_data.flags.allow_seamless_boot_optimization = true;
1503 		DRM_INFO("Seamless boot condition check passed\n");
1504 	}
1505 
1506 	INIT_LIST_HEAD(&adev->dm.da_list);
1507 	/* Display Core create. */
1508 	adev->dm.dc = dc_create(&init_data);
1509 
1510 	if (adev->dm.dc) {
1511 		DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1512 	} else {
1513 		DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1514 		goto error;
1515 	}
1516 
1517 	if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1518 		adev->dm.dc->debug.force_single_disp_pipe_split = false;
1519 		adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1520 	}
1521 
1522 	if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1523 		adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1524 	if (dm_should_disable_stutter(adev->pdev))
1525 		adev->dm.dc->debug.disable_stutter = true;
1526 
1527 	if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1528 		adev->dm.dc->debug.disable_stutter = true;
1529 
1530 	if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) {
1531 		adev->dm.dc->debug.disable_dsc = true;
1532 		adev->dm.dc->debug.disable_dsc_edp = true;
1533 	}
1534 
1535 	if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1536 		adev->dm.dc->debug.disable_clock_gate = true;
1537 
1538 	r = dm_dmub_hw_init(adev);
1539 	if (r) {
1540 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1541 		goto error;
1542 	}
1543 
1544 	dc_hardware_init(adev->dm.dc);
1545 
1546 	adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc);
1547 	if (!adev->dm.hpd_rx_offload_wq) {
1548 		DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n");
1549 		goto error;
1550 	}
1551 
1552 #if defined(CONFIG_DRM_AMD_DC_DCN)
1553 	if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) {
1554 		struct dc_phy_addr_space_config pa_config;
1555 
1556 		mmhub_read_system_context(adev, &pa_config);
1557 
1558 		// Call the DC init_memory func
1559 		dc_setup_system_context(adev->dm.dc, &pa_config);
1560 	}
1561 #endif
1562 
1563 	adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1564 	if (!adev->dm.freesync_module) {
1565 		DRM_ERROR(
1566 		"amdgpu: failed to initialize freesync_module.\n");
1567 	} else
1568 		DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1569 				adev->dm.freesync_module);
1570 
1571 	amdgpu_dm_init_color_mod();
1572 
1573 #if defined(CONFIG_DRM_AMD_DC_DCN)
1574 	if (adev->dm.dc->caps.max_links > 0) {
1575 		adev->dm.vblank_control_workqueue =
1576 			create_singlethread_workqueue("dm_vblank_control_workqueue");
1577 		if (!adev->dm.vblank_control_workqueue)
1578 			DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1579 	}
1580 #endif
1581 
1582 #ifdef CONFIG_DRM_AMD_DC_HDCP
1583 	if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) {
1584 		adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1585 
1586 		if (!adev->dm.hdcp_workqueue)
1587 			DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1588 		else
1589 			DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1590 
1591 		dc_init_callbacks(adev->dm.dc, &init_params);
1592 	}
1593 #endif
1594 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1595 	adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
1596 #endif
1597 	if (dc_enable_dmub_notifications(adev->dm.dc)) {
1598 		init_completion(&adev->dm.dmub_aux_transfer_done);
1599 		adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
1600 		if (!adev->dm.dmub_notify) {
1601 			DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
1602 			goto error;
1603 		}
1604 
1605 		adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq");
1606 		if (!adev->dm.delayed_hpd_wq) {
1607 			DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n");
1608 			goto error;
1609 		}
1610 
1611 		amdgpu_dm_outbox_init(adev);
1612 #if defined(CONFIG_DRM_AMD_DC_DCN)
1613 		if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY,
1614 			dmub_aux_setconfig_callback, false)) {
1615 			DRM_ERROR("amdgpu: fail to register dmub aux callback");
1616 			goto error;
1617 		}
1618 		if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) {
1619 			DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1620 			goto error;
1621 		}
1622 		if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true)) {
1623 			DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1624 			goto error;
1625 		}
1626 #endif /* CONFIG_DRM_AMD_DC_DCN */
1627 	}
1628 
1629 	if (amdgpu_dm_initialize_drm_device(adev)) {
1630 		DRM_ERROR(
1631 		"amdgpu: failed to initialize sw for display support.\n");
1632 		goto error;
1633 	}
1634 
1635 	/* create fake encoders for MST */
1636 	dm_dp_create_fake_mst_encoders(adev);
1637 
1638 	/* TODO: Add_display_info? */
1639 
1640 	/* TODO use dynamic cursor width */
1641 	adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1642 	adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1643 
1644 	if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1645 		DRM_ERROR(
1646 		"amdgpu: failed to initialize sw for display support.\n");
1647 		goto error;
1648 	}
1649 
1650 
1651 	DRM_DEBUG_DRIVER("KMS initialized.\n");
1652 
1653 	return 0;
1654 error:
1655 	amdgpu_dm_fini(adev);
1656 
1657 	return -EINVAL;
1658 }
1659 
1660 static int amdgpu_dm_early_fini(void *handle)
1661 {
1662 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1663 
1664 	amdgpu_dm_audio_fini(adev);
1665 
1666 	return 0;
1667 }
1668 
1669 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1670 {
1671 	int i;
1672 
1673 #if defined(CONFIG_DRM_AMD_DC_DCN)
1674 	if (adev->dm.vblank_control_workqueue) {
1675 		destroy_workqueue(adev->dm.vblank_control_workqueue);
1676 		adev->dm.vblank_control_workqueue = NULL;
1677 	}
1678 #endif
1679 
1680 	for (i = 0; i < adev->dm.display_indexes_num; i++) {
1681 		drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1682 	}
1683 
1684 	amdgpu_dm_destroy_drm_device(&adev->dm);
1685 
1686 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1687 	if (adev->dm.crc_rd_wrk) {
1688 		flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1689 		kfree(adev->dm.crc_rd_wrk);
1690 		adev->dm.crc_rd_wrk = NULL;
1691 	}
1692 #endif
1693 #ifdef CONFIG_DRM_AMD_DC_HDCP
1694 	if (adev->dm.hdcp_workqueue) {
1695 		hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1696 		adev->dm.hdcp_workqueue = NULL;
1697 	}
1698 
1699 	if (adev->dm.dc)
1700 		dc_deinit_callbacks(adev->dm.dc);
1701 #endif
1702 
1703 	dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1704 
1705 	if (dc_enable_dmub_notifications(adev->dm.dc)) {
1706 		kfree(adev->dm.dmub_notify);
1707 		adev->dm.dmub_notify = NULL;
1708 		destroy_workqueue(adev->dm.delayed_hpd_wq);
1709 		adev->dm.delayed_hpd_wq = NULL;
1710 	}
1711 
1712 	if (adev->dm.dmub_bo)
1713 		amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1714 				      &adev->dm.dmub_bo_gpu_addr,
1715 				      &adev->dm.dmub_bo_cpu_addr);
1716 
1717 	if (adev->dm.hpd_rx_offload_wq) {
1718 		for (i = 0; i < adev->dm.dc->caps.max_links; i++) {
1719 			if (adev->dm.hpd_rx_offload_wq[i].wq) {
1720 				destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq);
1721 				adev->dm.hpd_rx_offload_wq[i].wq = NULL;
1722 			}
1723 		}
1724 
1725 		kfree(adev->dm.hpd_rx_offload_wq);
1726 		adev->dm.hpd_rx_offload_wq = NULL;
1727 	}
1728 
1729 	/* DC Destroy TODO: Replace destroy DAL */
1730 	if (adev->dm.dc)
1731 		dc_destroy(&adev->dm.dc);
1732 	/*
1733 	 * TODO: pageflip, vlank interrupt
1734 	 *
1735 	 * amdgpu_dm_irq_fini(adev);
1736 	 */
1737 
1738 	if (adev->dm.cgs_device) {
1739 		amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1740 		adev->dm.cgs_device = NULL;
1741 	}
1742 	if (adev->dm.freesync_module) {
1743 		mod_freesync_destroy(adev->dm.freesync_module);
1744 		adev->dm.freesync_module = NULL;
1745 	}
1746 
1747 	mutex_destroy(&adev->dm.audio_lock);
1748 	mutex_destroy(&adev->dm.dc_lock);
1749 
1750 	return;
1751 }
1752 
1753 static int load_dmcu_fw(struct amdgpu_device *adev)
1754 {
1755 	const char *fw_name_dmcu = NULL;
1756 	int r;
1757 	const struct dmcu_firmware_header_v1_0 *hdr;
1758 
1759 	switch(adev->asic_type) {
1760 #if defined(CONFIG_DRM_AMD_DC_SI)
1761 	case CHIP_TAHITI:
1762 	case CHIP_PITCAIRN:
1763 	case CHIP_VERDE:
1764 	case CHIP_OLAND:
1765 #endif
1766 	case CHIP_BONAIRE:
1767 	case CHIP_HAWAII:
1768 	case CHIP_KAVERI:
1769 	case CHIP_KABINI:
1770 	case CHIP_MULLINS:
1771 	case CHIP_TONGA:
1772 	case CHIP_FIJI:
1773 	case CHIP_CARRIZO:
1774 	case CHIP_STONEY:
1775 	case CHIP_POLARIS11:
1776 	case CHIP_POLARIS10:
1777 	case CHIP_POLARIS12:
1778 	case CHIP_VEGAM:
1779 	case CHIP_VEGA10:
1780 	case CHIP_VEGA12:
1781 	case CHIP_VEGA20:
1782 		return 0;
1783 	case CHIP_NAVI12:
1784 		fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1785 		break;
1786 	case CHIP_RAVEN:
1787 		if (ASICREV_IS_PICASSO(adev->external_rev_id))
1788 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1789 		else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1790 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1791 		else
1792 			return 0;
1793 		break;
1794 	default:
1795 		switch (adev->ip_versions[DCE_HWIP][0]) {
1796 		case IP_VERSION(2, 0, 2):
1797 		case IP_VERSION(2, 0, 3):
1798 		case IP_VERSION(2, 0, 0):
1799 		case IP_VERSION(2, 1, 0):
1800 		case IP_VERSION(3, 0, 0):
1801 		case IP_VERSION(3, 0, 2):
1802 		case IP_VERSION(3, 0, 3):
1803 		case IP_VERSION(3, 0, 1):
1804 		case IP_VERSION(3, 1, 2):
1805 		case IP_VERSION(3, 1, 3):
1806 		case IP_VERSION(3, 1, 6):
1807 			return 0;
1808 		default:
1809 			break;
1810 		}
1811 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1812 		return -EINVAL;
1813 	}
1814 
1815 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1816 		DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1817 		return 0;
1818 	}
1819 
1820 	r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1821 	if (r == -ENOENT) {
1822 		/* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1823 		DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1824 		adev->dm.fw_dmcu = NULL;
1825 		return 0;
1826 	}
1827 	if (r) {
1828 		dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1829 			fw_name_dmcu);
1830 		return r;
1831 	}
1832 
1833 	r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1834 	if (r) {
1835 		dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1836 			fw_name_dmcu);
1837 		release_firmware(adev->dm.fw_dmcu);
1838 		adev->dm.fw_dmcu = NULL;
1839 		return r;
1840 	}
1841 
1842 	hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1843 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1844 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1845 	adev->firmware.fw_size +=
1846 		ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1847 
1848 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1849 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1850 	adev->firmware.fw_size +=
1851 		ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1852 
1853 	adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1854 
1855 	DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1856 
1857 	return 0;
1858 }
1859 
1860 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1861 {
1862 	struct amdgpu_device *adev = ctx;
1863 
1864 	return dm_read_reg(adev->dm.dc->ctx, address);
1865 }
1866 
1867 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1868 				     uint32_t value)
1869 {
1870 	struct amdgpu_device *adev = ctx;
1871 
1872 	return dm_write_reg(adev->dm.dc->ctx, address, value);
1873 }
1874 
1875 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1876 {
1877 	struct dmub_srv_create_params create_params;
1878 	struct dmub_srv_region_params region_params;
1879 	struct dmub_srv_region_info region_info;
1880 	struct dmub_srv_fb_params fb_params;
1881 	struct dmub_srv_fb_info *fb_info;
1882 	struct dmub_srv *dmub_srv;
1883 	const struct dmcub_firmware_header_v1_0 *hdr;
1884 	const char *fw_name_dmub;
1885 	enum dmub_asic dmub_asic;
1886 	enum dmub_status status;
1887 	int r;
1888 
1889 	switch (adev->ip_versions[DCE_HWIP][0]) {
1890 	case IP_VERSION(2, 1, 0):
1891 		dmub_asic = DMUB_ASIC_DCN21;
1892 		fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1893 		if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1894 			fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1895 		break;
1896 	case IP_VERSION(3, 0, 0):
1897 		if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) {
1898 			dmub_asic = DMUB_ASIC_DCN30;
1899 			fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1900 		} else {
1901 			dmub_asic = DMUB_ASIC_DCN30;
1902 			fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1903 		}
1904 		break;
1905 	case IP_VERSION(3, 0, 1):
1906 		dmub_asic = DMUB_ASIC_DCN301;
1907 		fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1908 		break;
1909 	case IP_VERSION(3, 0, 2):
1910 		dmub_asic = DMUB_ASIC_DCN302;
1911 		fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1912 		break;
1913 	case IP_VERSION(3, 0, 3):
1914 		dmub_asic = DMUB_ASIC_DCN303;
1915 		fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
1916 		break;
1917 	case IP_VERSION(3, 1, 2):
1918 	case IP_VERSION(3, 1, 3):
1919 		dmub_asic = (adev->external_rev_id == YELLOW_CARP_B0) ? DMUB_ASIC_DCN31B : DMUB_ASIC_DCN31;
1920 		fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
1921 		break;
1922 	case IP_VERSION(3, 1, 6):
1923 		dmub_asic = DMUB_ASIC_DCN316;
1924 		fw_name_dmub = FIRMWARE_DCN316_DMUB;
1925 		break;
1926 
1927 	default:
1928 		/* ASIC doesn't support DMUB. */
1929 		return 0;
1930 	}
1931 
1932 	r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1933 	if (r) {
1934 		DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1935 		return 0;
1936 	}
1937 
1938 	r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1939 	if (r) {
1940 		DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1941 		return 0;
1942 	}
1943 
1944 	hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1945 	adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1946 
1947 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1948 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1949 			AMDGPU_UCODE_ID_DMCUB;
1950 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1951 			adev->dm.dmub_fw;
1952 		adev->firmware.fw_size +=
1953 			ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1954 
1955 		DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1956 			 adev->dm.dmcub_fw_version);
1957 	}
1958 
1959 
1960 	adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1961 	dmub_srv = adev->dm.dmub_srv;
1962 
1963 	if (!dmub_srv) {
1964 		DRM_ERROR("Failed to allocate DMUB service!\n");
1965 		return -ENOMEM;
1966 	}
1967 
1968 	memset(&create_params, 0, sizeof(create_params));
1969 	create_params.user_ctx = adev;
1970 	create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1971 	create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1972 	create_params.asic = dmub_asic;
1973 
1974 	/* Create the DMUB service. */
1975 	status = dmub_srv_create(dmub_srv, &create_params);
1976 	if (status != DMUB_STATUS_OK) {
1977 		DRM_ERROR("Error creating DMUB service: %d\n", status);
1978 		return -EINVAL;
1979 	}
1980 
1981 	/* Calculate the size of all the regions for the DMUB service. */
1982 	memset(&region_params, 0, sizeof(region_params));
1983 
1984 	region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1985 					PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1986 	region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1987 	region_params.vbios_size = adev->bios_size;
1988 	region_params.fw_bss_data = region_params.bss_data_size ?
1989 		adev->dm.dmub_fw->data +
1990 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1991 		le32_to_cpu(hdr->inst_const_bytes) : NULL;
1992 	region_params.fw_inst_const =
1993 		adev->dm.dmub_fw->data +
1994 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1995 		PSP_HEADER_BYTES;
1996 
1997 	status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1998 					   &region_info);
1999 
2000 	if (status != DMUB_STATUS_OK) {
2001 		DRM_ERROR("Error calculating DMUB region info: %d\n", status);
2002 		return -EINVAL;
2003 	}
2004 
2005 	/*
2006 	 * Allocate a framebuffer based on the total size of all the regions.
2007 	 * TODO: Move this into GART.
2008 	 */
2009 	r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
2010 				    AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
2011 				    &adev->dm.dmub_bo_gpu_addr,
2012 				    &adev->dm.dmub_bo_cpu_addr);
2013 	if (r)
2014 		return r;
2015 
2016 	/* Rebase the regions on the framebuffer address. */
2017 	memset(&fb_params, 0, sizeof(fb_params));
2018 	fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
2019 	fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
2020 	fb_params.region_info = &region_info;
2021 
2022 	adev->dm.dmub_fb_info =
2023 		kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
2024 	fb_info = adev->dm.dmub_fb_info;
2025 
2026 	if (!fb_info) {
2027 		DRM_ERROR(
2028 			"Failed to allocate framebuffer info for DMUB service!\n");
2029 		return -ENOMEM;
2030 	}
2031 
2032 	status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
2033 	if (status != DMUB_STATUS_OK) {
2034 		DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
2035 		return -EINVAL;
2036 	}
2037 
2038 	return 0;
2039 }
2040 
2041 static int dm_sw_init(void *handle)
2042 {
2043 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2044 	int r;
2045 
2046 	r = dm_dmub_sw_init(adev);
2047 	if (r)
2048 		return r;
2049 
2050 	return load_dmcu_fw(adev);
2051 }
2052 
2053 static int dm_sw_fini(void *handle)
2054 {
2055 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2056 
2057 	kfree(adev->dm.dmub_fb_info);
2058 	adev->dm.dmub_fb_info = NULL;
2059 
2060 	if (adev->dm.dmub_srv) {
2061 		dmub_srv_destroy(adev->dm.dmub_srv);
2062 		adev->dm.dmub_srv = NULL;
2063 	}
2064 
2065 	release_firmware(adev->dm.dmub_fw);
2066 	adev->dm.dmub_fw = NULL;
2067 
2068 	release_firmware(adev->dm.fw_dmcu);
2069 	adev->dm.fw_dmcu = NULL;
2070 
2071 	return 0;
2072 }
2073 
2074 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
2075 {
2076 	struct amdgpu_dm_connector *aconnector;
2077 	struct drm_connector *connector;
2078 	struct drm_connector_list_iter iter;
2079 	int ret = 0;
2080 
2081 	drm_connector_list_iter_begin(dev, &iter);
2082 	drm_for_each_connector_iter(connector, &iter) {
2083 		aconnector = to_amdgpu_dm_connector(connector);
2084 		if (aconnector->dc_link->type == dc_connection_mst_branch &&
2085 		    aconnector->mst_mgr.aux) {
2086 			DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
2087 					 aconnector,
2088 					 aconnector->base.base.id);
2089 
2090 			ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
2091 			if (ret < 0) {
2092 				DRM_ERROR("DM_MST: Failed to start MST\n");
2093 				aconnector->dc_link->type =
2094 					dc_connection_single;
2095 				break;
2096 			}
2097 		}
2098 	}
2099 	drm_connector_list_iter_end(&iter);
2100 
2101 	return ret;
2102 }
2103 
2104 static int dm_late_init(void *handle)
2105 {
2106 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2107 
2108 	struct dmcu_iram_parameters params;
2109 	unsigned int linear_lut[16];
2110 	int i;
2111 	struct dmcu *dmcu = NULL;
2112 
2113 	dmcu = adev->dm.dc->res_pool->dmcu;
2114 
2115 	for (i = 0; i < 16; i++)
2116 		linear_lut[i] = 0xFFFF * i / 15;
2117 
2118 	params.set = 0;
2119 	params.backlight_ramping_override = false;
2120 	params.backlight_ramping_start = 0xCCCC;
2121 	params.backlight_ramping_reduction = 0xCCCCCCCC;
2122 	params.backlight_lut_array_size = 16;
2123 	params.backlight_lut_array = linear_lut;
2124 
2125 	/* Min backlight level after ABM reduction,  Don't allow below 1%
2126 	 * 0xFFFF x 0.01 = 0x28F
2127 	 */
2128 	params.min_abm_backlight = 0x28F;
2129 	/* In the case where abm is implemented on dmcub,
2130 	* dmcu object will be null.
2131 	* ABM 2.4 and up are implemented on dmcub.
2132 	*/
2133 	if (dmcu) {
2134 		if (!dmcu_load_iram(dmcu, params))
2135 			return -EINVAL;
2136 	} else if (adev->dm.dc->ctx->dmub_srv) {
2137 		struct dc_link *edp_links[MAX_NUM_EDP];
2138 		int edp_num;
2139 
2140 		get_edp_links(adev->dm.dc, edp_links, &edp_num);
2141 		for (i = 0; i < edp_num; i++) {
2142 			if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i))
2143 				return -EINVAL;
2144 		}
2145 	}
2146 
2147 	return detect_mst_link_for_all_connectors(adev_to_drm(adev));
2148 }
2149 
2150 static void s3_handle_mst(struct drm_device *dev, bool suspend)
2151 {
2152 	struct amdgpu_dm_connector *aconnector;
2153 	struct drm_connector *connector;
2154 	struct drm_connector_list_iter iter;
2155 	struct drm_dp_mst_topology_mgr *mgr;
2156 	int ret;
2157 	bool need_hotplug = false;
2158 
2159 	drm_connector_list_iter_begin(dev, &iter);
2160 	drm_for_each_connector_iter(connector, &iter) {
2161 		aconnector = to_amdgpu_dm_connector(connector);
2162 		if (aconnector->dc_link->type != dc_connection_mst_branch ||
2163 		    aconnector->mst_port)
2164 			continue;
2165 
2166 		mgr = &aconnector->mst_mgr;
2167 
2168 		if (suspend) {
2169 			drm_dp_mst_topology_mgr_suspend(mgr);
2170 		} else {
2171 			ret = drm_dp_mst_topology_mgr_resume(mgr, true);
2172 			if (ret < 0) {
2173 				drm_dp_mst_topology_mgr_set_mst(mgr, false);
2174 				need_hotplug = true;
2175 			}
2176 		}
2177 	}
2178 	drm_connector_list_iter_end(&iter);
2179 
2180 	if (need_hotplug)
2181 		drm_kms_helper_hotplug_event(dev);
2182 }
2183 
2184 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
2185 {
2186 	int ret = 0;
2187 
2188 	/* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
2189 	 * on window driver dc implementation.
2190 	 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
2191 	 * should be passed to smu during boot up and resume from s3.
2192 	 * boot up: dc calculate dcn watermark clock settings within dc_create,
2193 	 * dcn20_resource_construct
2194 	 * then call pplib functions below to pass the settings to smu:
2195 	 * smu_set_watermarks_for_clock_ranges
2196 	 * smu_set_watermarks_table
2197 	 * navi10_set_watermarks_table
2198 	 * smu_write_watermarks_table
2199 	 *
2200 	 * For Renoir, clock settings of dcn watermark are also fixed values.
2201 	 * dc has implemented different flow for window driver:
2202 	 * dc_hardware_init / dc_set_power_state
2203 	 * dcn10_init_hw
2204 	 * notify_wm_ranges
2205 	 * set_wm_ranges
2206 	 * -- Linux
2207 	 * smu_set_watermarks_for_clock_ranges
2208 	 * renoir_set_watermarks_table
2209 	 * smu_write_watermarks_table
2210 	 *
2211 	 * For Linux,
2212 	 * dc_hardware_init -> amdgpu_dm_init
2213 	 * dc_set_power_state --> dm_resume
2214 	 *
2215 	 * therefore, this function apply to navi10/12/14 but not Renoir
2216 	 * *
2217 	 */
2218 	switch (adev->ip_versions[DCE_HWIP][0]) {
2219 	case IP_VERSION(2, 0, 2):
2220 	case IP_VERSION(2, 0, 0):
2221 		break;
2222 	default:
2223 		return 0;
2224 	}
2225 
2226 	ret = amdgpu_dpm_write_watermarks_table(adev);
2227 	if (ret) {
2228 		DRM_ERROR("Failed to update WMTABLE!\n");
2229 		return ret;
2230 	}
2231 
2232 	return 0;
2233 }
2234 
2235 /**
2236  * dm_hw_init() - Initialize DC device
2237  * @handle: The base driver device containing the amdgpu_dm device.
2238  *
2239  * Initialize the &struct amdgpu_display_manager device. This involves calling
2240  * the initializers of each DM component, then populating the struct with them.
2241  *
2242  * Although the function implies hardware initialization, both hardware and
2243  * software are initialized here. Splitting them out to their relevant init
2244  * hooks is a future TODO item.
2245  *
2246  * Some notable things that are initialized here:
2247  *
2248  * - Display Core, both software and hardware
2249  * - DC modules that we need (freesync and color management)
2250  * - DRM software states
2251  * - Interrupt sources and handlers
2252  * - Vblank support
2253  * - Debug FS entries, if enabled
2254  */
2255 static int dm_hw_init(void *handle)
2256 {
2257 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2258 	/* Create DAL display manager */
2259 	amdgpu_dm_init(adev);
2260 	amdgpu_dm_hpd_init(adev);
2261 
2262 	return 0;
2263 }
2264 
2265 /**
2266  * dm_hw_fini() - Teardown DC device
2267  * @handle: The base driver device containing the amdgpu_dm device.
2268  *
2269  * Teardown components within &struct amdgpu_display_manager that require
2270  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
2271  * were loaded. Also flush IRQ workqueues and disable them.
2272  */
2273 static int dm_hw_fini(void *handle)
2274 {
2275 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2276 
2277 	amdgpu_dm_hpd_fini(adev);
2278 
2279 	amdgpu_dm_irq_fini(adev);
2280 	amdgpu_dm_fini(adev);
2281 	return 0;
2282 }
2283 
2284 
2285 static int dm_enable_vblank(struct drm_crtc *crtc);
2286 static void dm_disable_vblank(struct drm_crtc *crtc);
2287 
2288 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
2289 				 struct dc_state *state, bool enable)
2290 {
2291 	enum dc_irq_source irq_source;
2292 	struct amdgpu_crtc *acrtc;
2293 	int rc = -EBUSY;
2294 	int i = 0;
2295 
2296 	for (i = 0; i < state->stream_count; i++) {
2297 		acrtc = get_crtc_by_otg_inst(
2298 				adev, state->stream_status[i].primary_otg_inst);
2299 
2300 		if (acrtc && state->stream_status[i].plane_count != 0) {
2301 			irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
2302 			rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
2303 			DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
2304 				      acrtc->crtc_id, enable ? "en" : "dis", rc);
2305 			if (rc)
2306 				DRM_WARN("Failed to %s pflip interrupts\n",
2307 					 enable ? "enable" : "disable");
2308 
2309 			if (enable) {
2310 				rc = dm_enable_vblank(&acrtc->base);
2311 				if (rc)
2312 					DRM_WARN("Failed to enable vblank interrupts\n");
2313 			} else {
2314 				dm_disable_vblank(&acrtc->base);
2315 			}
2316 
2317 		}
2318 	}
2319 
2320 }
2321 
2322 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
2323 {
2324 	struct dc_state *context = NULL;
2325 	enum dc_status res = DC_ERROR_UNEXPECTED;
2326 	int i;
2327 	struct dc_stream_state *del_streams[MAX_PIPES];
2328 	int del_streams_count = 0;
2329 
2330 	memset(del_streams, 0, sizeof(del_streams));
2331 
2332 	context = dc_create_state(dc);
2333 	if (context == NULL)
2334 		goto context_alloc_fail;
2335 
2336 	dc_resource_state_copy_construct_current(dc, context);
2337 
2338 	/* First remove from context all streams */
2339 	for (i = 0; i < context->stream_count; i++) {
2340 		struct dc_stream_state *stream = context->streams[i];
2341 
2342 		del_streams[del_streams_count++] = stream;
2343 	}
2344 
2345 	/* Remove all planes for removed streams and then remove the streams */
2346 	for (i = 0; i < del_streams_count; i++) {
2347 		if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
2348 			res = DC_FAIL_DETACH_SURFACES;
2349 			goto fail;
2350 		}
2351 
2352 		res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
2353 		if (res != DC_OK)
2354 			goto fail;
2355 	}
2356 
2357 	res = dc_commit_state(dc, context);
2358 
2359 fail:
2360 	dc_release_state(context);
2361 
2362 context_alloc_fail:
2363 	return res;
2364 }
2365 
2366 static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm)
2367 {
2368 	int i;
2369 
2370 	if (dm->hpd_rx_offload_wq) {
2371 		for (i = 0; i < dm->dc->caps.max_links; i++)
2372 			flush_workqueue(dm->hpd_rx_offload_wq[i].wq);
2373 	}
2374 }
2375 
2376 static int dm_suspend(void *handle)
2377 {
2378 	struct amdgpu_device *adev = handle;
2379 	struct amdgpu_display_manager *dm = &adev->dm;
2380 	int ret = 0;
2381 
2382 	if (amdgpu_in_reset(adev)) {
2383 		mutex_lock(&dm->dc_lock);
2384 
2385 #if defined(CONFIG_DRM_AMD_DC_DCN)
2386 		dc_allow_idle_optimizations(adev->dm.dc, false);
2387 #endif
2388 
2389 		dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
2390 
2391 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
2392 
2393 		amdgpu_dm_commit_zero_streams(dm->dc);
2394 
2395 		amdgpu_dm_irq_suspend(adev);
2396 
2397 		hpd_rx_irq_work_suspend(dm);
2398 
2399 		return ret;
2400 	}
2401 
2402 	WARN_ON(adev->dm.cached_state);
2403 	adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
2404 
2405 	s3_handle_mst(adev_to_drm(adev), true);
2406 
2407 	amdgpu_dm_irq_suspend(adev);
2408 
2409 	hpd_rx_irq_work_suspend(dm);
2410 
2411 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
2412 
2413 	return 0;
2414 }
2415 
2416 struct amdgpu_dm_connector *
2417 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
2418 					     struct drm_crtc *crtc)
2419 {
2420 	uint32_t i;
2421 	struct drm_connector_state *new_con_state;
2422 	struct drm_connector *connector;
2423 	struct drm_crtc *crtc_from_state;
2424 
2425 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
2426 		crtc_from_state = new_con_state->crtc;
2427 
2428 		if (crtc_from_state == crtc)
2429 			return to_amdgpu_dm_connector(connector);
2430 	}
2431 
2432 	return NULL;
2433 }
2434 
2435 static void emulated_link_detect(struct dc_link *link)
2436 {
2437 	struct dc_sink_init_data sink_init_data = { 0 };
2438 	struct display_sink_capability sink_caps = { 0 };
2439 	enum dc_edid_status edid_status;
2440 	struct dc_context *dc_ctx = link->ctx;
2441 	struct dc_sink *sink = NULL;
2442 	struct dc_sink *prev_sink = NULL;
2443 
2444 	link->type = dc_connection_none;
2445 	prev_sink = link->local_sink;
2446 
2447 	if (prev_sink)
2448 		dc_sink_release(prev_sink);
2449 
2450 	switch (link->connector_signal) {
2451 	case SIGNAL_TYPE_HDMI_TYPE_A: {
2452 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2453 		sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2454 		break;
2455 	}
2456 
2457 	case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2458 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2459 		sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2460 		break;
2461 	}
2462 
2463 	case SIGNAL_TYPE_DVI_DUAL_LINK: {
2464 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2465 		sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2466 		break;
2467 	}
2468 
2469 	case SIGNAL_TYPE_LVDS: {
2470 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2471 		sink_caps.signal = SIGNAL_TYPE_LVDS;
2472 		break;
2473 	}
2474 
2475 	case SIGNAL_TYPE_EDP: {
2476 		sink_caps.transaction_type =
2477 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2478 		sink_caps.signal = SIGNAL_TYPE_EDP;
2479 		break;
2480 	}
2481 
2482 	case SIGNAL_TYPE_DISPLAY_PORT: {
2483 		sink_caps.transaction_type =
2484 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2485 		sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2486 		break;
2487 	}
2488 
2489 	default:
2490 		DC_ERROR("Invalid connector type! signal:%d\n",
2491 			link->connector_signal);
2492 		return;
2493 	}
2494 
2495 	sink_init_data.link = link;
2496 	sink_init_data.sink_signal = sink_caps.signal;
2497 
2498 	sink = dc_sink_create(&sink_init_data);
2499 	if (!sink) {
2500 		DC_ERROR("Failed to create sink!\n");
2501 		return;
2502 	}
2503 
2504 	/* dc_sink_create returns a new reference */
2505 	link->local_sink = sink;
2506 
2507 	edid_status = dm_helpers_read_local_edid(
2508 			link->ctx,
2509 			link,
2510 			sink);
2511 
2512 	if (edid_status != EDID_OK)
2513 		DC_ERROR("Failed to read EDID");
2514 
2515 }
2516 
2517 static void dm_gpureset_commit_state(struct dc_state *dc_state,
2518 				     struct amdgpu_display_manager *dm)
2519 {
2520 	struct {
2521 		struct dc_surface_update surface_updates[MAX_SURFACES];
2522 		struct dc_plane_info plane_infos[MAX_SURFACES];
2523 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
2524 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2525 		struct dc_stream_update stream_update;
2526 	} * bundle;
2527 	int k, m;
2528 
2529 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2530 
2531 	if (!bundle) {
2532 		dm_error("Failed to allocate update bundle\n");
2533 		goto cleanup;
2534 	}
2535 
2536 	for (k = 0; k < dc_state->stream_count; k++) {
2537 		bundle->stream_update.stream = dc_state->streams[k];
2538 
2539 		for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2540 			bundle->surface_updates[m].surface =
2541 				dc_state->stream_status->plane_states[m];
2542 			bundle->surface_updates[m].surface->force_full_update =
2543 				true;
2544 		}
2545 		dc_commit_updates_for_stream(
2546 			dm->dc, bundle->surface_updates,
2547 			dc_state->stream_status->plane_count,
2548 			dc_state->streams[k], &bundle->stream_update, dc_state);
2549 	}
2550 
2551 cleanup:
2552 	kfree(bundle);
2553 
2554 	return;
2555 }
2556 
2557 static void dm_set_dpms_off(struct dc_link *link, struct dm_crtc_state *acrtc_state)
2558 {
2559 	struct dc_stream_state *stream_state;
2560 	struct amdgpu_dm_connector *aconnector = link->priv;
2561 	struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2562 	struct dc_stream_update stream_update;
2563 	bool dpms_off = true;
2564 
2565 	memset(&stream_update, 0, sizeof(stream_update));
2566 	stream_update.dpms_off = &dpms_off;
2567 
2568 	mutex_lock(&adev->dm.dc_lock);
2569 	stream_state = dc_stream_find_from_link(link);
2570 
2571 	if (stream_state == NULL) {
2572 		DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2573 		mutex_unlock(&adev->dm.dc_lock);
2574 		return;
2575 	}
2576 
2577 	stream_update.stream = stream_state;
2578 	acrtc_state->force_dpms_off = true;
2579 	dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
2580 				     stream_state, &stream_update,
2581 				     stream_state->ctx->dc->current_state);
2582 	mutex_unlock(&adev->dm.dc_lock);
2583 }
2584 
2585 static int dm_resume(void *handle)
2586 {
2587 	struct amdgpu_device *adev = handle;
2588 	struct drm_device *ddev = adev_to_drm(adev);
2589 	struct amdgpu_display_manager *dm = &adev->dm;
2590 	struct amdgpu_dm_connector *aconnector;
2591 	struct drm_connector *connector;
2592 	struct drm_connector_list_iter iter;
2593 	struct drm_crtc *crtc;
2594 	struct drm_crtc_state *new_crtc_state;
2595 	struct dm_crtc_state *dm_new_crtc_state;
2596 	struct drm_plane *plane;
2597 	struct drm_plane_state *new_plane_state;
2598 	struct dm_plane_state *dm_new_plane_state;
2599 	struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2600 	enum dc_connection_type new_connection_type = dc_connection_none;
2601 	struct dc_state *dc_state;
2602 	int i, r, j;
2603 
2604 	if (amdgpu_in_reset(adev)) {
2605 		dc_state = dm->cached_dc_state;
2606 
2607 		/*
2608 		 * The dc->current_state is backed up into dm->cached_dc_state
2609 		 * before we commit 0 streams.
2610 		 *
2611 		 * DC will clear link encoder assignments on the real state
2612 		 * but the changes won't propagate over to the copy we made
2613 		 * before the 0 streams commit.
2614 		 *
2615 		 * DC expects that link encoder assignments are *not* valid
2616 		 * when committing a state, so as a workaround it needs to be
2617 		 * cleared here.
2618 		 */
2619 		link_enc_cfg_init(dm->dc, dc_state);
2620 
2621 		if (dc_enable_dmub_notifications(adev->dm.dc))
2622 			amdgpu_dm_outbox_init(adev);
2623 
2624 		r = dm_dmub_hw_init(adev);
2625 		if (r)
2626 			DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2627 
2628 		dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2629 		dc_resume(dm->dc);
2630 
2631 		amdgpu_dm_irq_resume_early(adev);
2632 
2633 		for (i = 0; i < dc_state->stream_count; i++) {
2634 			dc_state->streams[i]->mode_changed = true;
2635 			for (j = 0; j < dc_state->stream_status[i].plane_count; j++) {
2636 				dc_state->stream_status[i].plane_states[j]->update_flags.raw
2637 					= 0xffffffff;
2638 			}
2639 		}
2640 
2641 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
2642 
2643 		dm_gpureset_commit_state(dm->cached_dc_state, dm);
2644 
2645 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2646 
2647 		dc_release_state(dm->cached_dc_state);
2648 		dm->cached_dc_state = NULL;
2649 
2650 		amdgpu_dm_irq_resume_late(adev);
2651 
2652 		mutex_unlock(&dm->dc_lock);
2653 
2654 		return 0;
2655 	}
2656 	/* Recreate dc_state - DC invalidates it when setting power state to S3. */
2657 	dc_release_state(dm_state->context);
2658 	dm_state->context = dc_create_state(dm->dc);
2659 	/* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2660 	dc_resource_state_construct(dm->dc, dm_state->context);
2661 
2662 	/* Re-enable outbox interrupts for DPIA. */
2663 	if (dc_enable_dmub_notifications(adev->dm.dc))
2664 		amdgpu_dm_outbox_init(adev);
2665 
2666 	/* Before powering on DC we need to re-initialize DMUB. */
2667 	dm_dmub_hw_resume(adev);
2668 
2669 	/* power on hardware */
2670 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2671 
2672 	/* program HPD filter */
2673 	dc_resume(dm->dc);
2674 
2675 	/*
2676 	 * early enable HPD Rx IRQ, should be done before set mode as short
2677 	 * pulse interrupts are used for MST
2678 	 */
2679 	amdgpu_dm_irq_resume_early(adev);
2680 
2681 	/* On resume we need to rewrite the MSTM control bits to enable MST*/
2682 	s3_handle_mst(ddev, false);
2683 
2684 	/* Do detection*/
2685 	drm_connector_list_iter_begin(ddev, &iter);
2686 	drm_for_each_connector_iter(connector, &iter) {
2687 		aconnector = to_amdgpu_dm_connector(connector);
2688 
2689 		/*
2690 		 * this is the case when traversing through already created
2691 		 * MST connectors, should be skipped
2692 		 */
2693 		if (aconnector->mst_port)
2694 			continue;
2695 
2696 		mutex_lock(&aconnector->hpd_lock);
2697 		if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2698 			DRM_ERROR("KMS: Failed to detect connector\n");
2699 
2700 		if (aconnector->base.force && new_connection_type == dc_connection_none)
2701 			emulated_link_detect(aconnector->dc_link);
2702 		else
2703 			dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2704 
2705 		if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2706 			aconnector->fake_enable = false;
2707 
2708 		if (aconnector->dc_sink)
2709 			dc_sink_release(aconnector->dc_sink);
2710 		aconnector->dc_sink = NULL;
2711 		amdgpu_dm_update_connector_after_detect(aconnector);
2712 		mutex_unlock(&aconnector->hpd_lock);
2713 	}
2714 	drm_connector_list_iter_end(&iter);
2715 
2716 	/* Force mode set in atomic commit */
2717 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2718 		new_crtc_state->active_changed = true;
2719 
2720 	/*
2721 	 * atomic_check is expected to create the dc states. We need to release
2722 	 * them here, since they were duplicated as part of the suspend
2723 	 * procedure.
2724 	 */
2725 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2726 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2727 		if (dm_new_crtc_state->stream) {
2728 			WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2729 			dc_stream_release(dm_new_crtc_state->stream);
2730 			dm_new_crtc_state->stream = NULL;
2731 		}
2732 	}
2733 
2734 	for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2735 		dm_new_plane_state = to_dm_plane_state(new_plane_state);
2736 		if (dm_new_plane_state->dc_state) {
2737 			WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2738 			dc_plane_state_release(dm_new_plane_state->dc_state);
2739 			dm_new_plane_state->dc_state = NULL;
2740 		}
2741 	}
2742 
2743 	drm_atomic_helper_resume(ddev, dm->cached_state);
2744 
2745 	dm->cached_state = NULL;
2746 
2747 	amdgpu_dm_irq_resume_late(adev);
2748 
2749 	amdgpu_dm_smu_write_watermarks_table(adev);
2750 
2751 	return 0;
2752 }
2753 
2754 /**
2755  * DOC: DM Lifecycle
2756  *
2757  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2758  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2759  * the base driver's device list to be initialized and torn down accordingly.
2760  *
2761  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2762  */
2763 
2764 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2765 	.name = "dm",
2766 	.early_init = dm_early_init,
2767 	.late_init = dm_late_init,
2768 	.sw_init = dm_sw_init,
2769 	.sw_fini = dm_sw_fini,
2770 	.early_fini = amdgpu_dm_early_fini,
2771 	.hw_init = dm_hw_init,
2772 	.hw_fini = dm_hw_fini,
2773 	.suspend = dm_suspend,
2774 	.resume = dm_resume,
2775 	.is_idle = dm_is_idle,
2776 	.wait_for_idle = dm_wait_for_idle,
2777 	.check_soft_reset = dm_check_soft_reset,
2778 	.soft_reset = dm_soft_reset,
2779 	.set_clockgating_state = dm_set_clockgating_state,
2780 	.set_powergating_state = dm_set_powergating_state,
2781 };
2782 
2783 const struct amdgpu_ip_block_version dm_ip_block =
2784 {
2785 	.type = AMD_IP_BLOCK_TYPE_DCE,
2786 	.major = 1,
2787 	.minor = 0,
2788 	.rev = 0,
2789 	.funcs = &amdgpu_dm_funcs,
2790 };
2791 
2792 
2793 /**
2794  * DOC: atomic
2795  *
2796  * *WIP*
2797  */
2798 
2799 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2800 	.fb_create = amdgpu_display_user_framebuffer_create,
2801 	.get_format_info = amd_get_format_info,
2802 	.output_poll_changed = drm_fb_helper_output_poll_changed,
2803 	.atomic_check = amdgpu_dm_atomic_check,
2804 	.atomic_commit = drm_atomic_helper_commit,
2805 };
2806 
2807 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2808 	.atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2809 };
2810 
2811 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2812 {
2813 	u32 max_cll, min_cll, max, min, q, r;
2814 	struct amdgpu_dm_backlight_caps *caps;
2815 	struct amdgpu_display_manager *dm;
2816 	struct drm_connector *conn_base;
2817 	struct amdgpu_device *adev;
2818 	struct dc_link *link = NULL;
2819 	static const u8 pre_computed_values[] = {
2820 		50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2821 		71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2822 	int i;
2823 
2824 	if (!aconnector || !aconnector->dc_link)
2825 		return;
2826 
2827 	link = aconnector->dc_link;
2828 	if (link->connector_signal != SIGNAL_TYPE_EDP)
2829 		return;
2830 
2831 	conn_base = &aconnector->base;
2832 	adev = drm_to_adev(conn_base->dev);
2833 	dm = &adev->dm;
2834 	for (i = 0; i < dm->num_of_edps; i++) {
2835 		if (link == dm->backlight_link[i])
2836 			break;
2837 	}
2838 	if (i >= dm->num_of_edps)
2839 		return;
2840 	caps = &dm->backlight_caps[i];
2841 	caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2842 	caps->aux_support = false;
2843 	max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2844 	min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2845 
2846 	if (caps->ext_caps->bits.oled == 1 /*||
2847 	    caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2848 	    caps->ext_caps->bits.hdr_aux_backlight_control == 1*/)
2849 		caps->aux_support = true;
2850 
2851 	if (amdgpu_backlight == 0)
2852 		caps->aux_support = false;
2853 	else if (amdgpu_backlight == 1)
2854 		caps->aux_support = true;
2855 
2856 	/* From the specification (CTA-861-G), for calculating the maximum
2857 	 * luminance we need to use:
2858 	 *	Luminance = 50*2**(CV/32)
2859 	 * Where CV is a one-byte value.
2860 	 * For calculating this expression we may need float point precision;
2861 	 * to avoid this complexity level, we take advantage that CV is divided
2862 	 * by a constant. From the Euclids division algorithm, we know that CV
2863 	 * can be written as: CV = 32*q + r. Next, we replace CV in the
2864 	 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2865 	 * need to pre-compute the value of r/32. For pre-computing the values
2866 	 * We just used the following Ruby line:
2867 	 *	(0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2868 	 * The results of the above expressions can be verified at
2869 	 * pre_computed_values.
2870 	 */
2871 	q = max_cll >> 5;
2872 	r = max_cll % 32;
2873 	max = (1 << q) * pre_computed_values[r];
2874 
2875 	// min luminance: maxLum * (CV/255)^2 / 100
2876 	q = DIV_ROUND_CLOSEST(min_cll, 255);
2877 	min = max * DIV_ROUND_CLOSEST((q * q), 100);
2878 
2879 	caps->aux_max_input_signal = max;
2880 	caps->aux_min_input_signal = min;
2881 }
2882 
2883 void amdgpu_dm_update_connector_after_detect(
2884 		struct amdgpu_dm_connector *aconnector)
2885 {
2886 	struct drm_connector *connector = &aconnector->base;
2887 	struct drm_device *dev = connector->dev;
2888 	struct dc_sink *sink;
2889 
2890 	/* MST handled by drm_mst framework */
2891 	if (aconnector->mst_mgr.mst_state == true)
2892 		return;
2893 
2894 	sink = aconnector->dc_link->local_sink;
2895 	if (sink)
2896 		dc_sink_retain(sink);
2897 
2898 	/*
2899 	 * Edid mgmt connector gets first update only in mode_valid hook and then
2900 	 * the connector sink is set to either fake or physical sink depends on link status.
2901 	 * Skip if already done during boot.
2902 	 */
2903 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2904 			&& aconnector->dc_em_sink) {
2905 
2906 		/*
2907 		 * For S3 resume with headless use eml_sink to fake stream
2908 		 * because on resume connector->sink is set to NULL
2909 		 */
2910 		mutex_lock(&dev->mode_config.mutex);
2911 
2912 		if (sink) {
2913 			if (aconnector->dc_sink) {
2914 				amdgpu_dm_update_freesync_caps(connector, NULL);
2915 				/*
2916 				 * retain and release below are used to
2917 				 * bump up refcount for sink because the link doesn't point
2918 				 * to it anymore after disconnect, so on next crtc to connector
2919 				 * reshuffle by UMD we will get into unwanted dc_sink release
2920 				 */
2921 				dc_sink_release(aconnector->dc_sink);
2922 			}
2923 			aconnector->dc_sink = sink;
2924 			dc_sink_retain(aconnector->dc_sink);
2925 			amdgpu_dm_update_freesync_caps(connector,
2926 					aconnector->edid);
2927 		} else {
2928 			amdgpu_dm_update_freesync_caps(connector, NULL);
2929 			if (!aconnector->dc_sink) {
2930 				aconnector->dc_sink = aconnector->dc_em_sink;
2931 				dc_sink_retain(aconnector->dc_sink);
2932 			}
2933 		}
2934 
2935 		mutex_unlock(&dev->mode_config.mutex);
2936 
2937 		if (sink)
2938 			dc_sink_release(sink);
2939 		return;
2940 	}
2941 
2942 	/*
2943 	 * TODO: temporary guard to look for proper fix
2944 	 * if this sink is MST sink, we should not do anything
2945 	 */
2946 	if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2947 		dc_sink_release(sink);
2948 		return;
2949 	}
2950 
2951 	if (aconnector->dc_sink == sink) {
2952 		/*
2953 		 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2954 		 * Do nothing!!
2955 		 */
2956 		DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2957 				aconnector->connector_id);
2958 		if (sink)
2959 			dc_sink_release(sink);
2960 		return;
2961 	}
2962 
2963 	DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2964 		aconnector->connector_id, aconnector->dc_sink, sink);
2965 
2966 	mutex_lock(&dev->mode_config.mutex);
2967 
2968 	/*
2969 	 * 1. Update status of the drm connector
2970 	 * 2. Send an event and let userspace tell us what to do
2971 	 */
2972 	if (sink) {
2973 		/*
2974 		 * TODO: check if we still need the S3 mode update workaround.
2975 		 * If yes, put it here.
2976 		 */
2977 		if (aconnector->dc_sink) {
2978 			amdgpu_dm_update_freesync_caps(connector, NULL);
2979 			dc_sink_release(aconnector->dc_sink);
2980 		}
2981 
2982 		aconnector->dc_sink = sink;
2983 		dc_sink_retain(aconnector->dc_sink);
2984 		if (sink->dc_edid.length == 0) {
2985 			aconnector->edid = NULL;
2986 			if (aconnector->dc_link->aux_mode) {
2987 				drm_dp_cec_unset_edid(
2988 					&aconnector->dm_dp_aux.aux);
2989 			}
2990 		} else {
2991 			aconnector->edid =
2992 				(struct edid *)sink->dc_edid.raw_edid;
2993 
2994 			if (aconnector->dc_link->aux_mode)
2995 				drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2996 						    aconnector->edid);
2997 		}
2998 
2999 		drm_connector_update_edid_property(connector, aconnector->edid);
3000 		amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
3001 		update_connector_ext_caps(aconnector);
3002 	} else {
3003 		drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
3004 		amdgpu_dm_update_freesync_caps(connector, NULL);
3005 		drm_connector_update_edid_property(connector, NULL);
3006 		aconnector->num_modes = 0;
3007 		dc_sink_release(aconnector->dc_sink);
3008 		aconnector->dc_sink = NULL;
3009 		aconnector->edid = NULL;
3010 #ifdef CONFIG_DRM_AMD_DC_HDCP
3011 		/* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
3012 		if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
3013 			connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
3014 #endif
3015 	}
3016 
3017 	mutex_unlock(&dev->mode_config.mutex);
3018 
3019 	update_subconnector_property(aconnector);
3020 
3021 	if (sink)
3022 		dc_sink_release(sink);
3023 }
3024 
3025 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
3026 {
3027 	struct drm_connector *connector = &aconnector->base;
3028 	struct drm_device *dev = connector->dev;
3029 	enum dc_connection_type new_connection_type = dc_connection_none;
3030 	struct amdgpu_device *adev = drm_to_adev(dev);
3031 	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
3032 	struct dm_crtc_state *dm_crtc_state = NULL;
3033 
3034 	if (adev->dm.disable_hpd_irq)
3035 		return;
3036 
3037 	if (dm_con_state->base.state && dm_con_state->base.crtc)
3038 		dm_crtc_state = to_dm_crtc_state(drm_atomic_get_crtc_state(
3039 					dm_con_state->base.state,
3040 					dm_con_state->base.crtc));
3041 	/*
3042 	 * In case of failure or MST no need to update connector status or notify the OS
3043 	 * since (for MST case) MST does this in its own context.
3044 	 */
3045 	mutex_lock(&aconnector->hpd_lock);
3046 
3047 #ifdef CONFIG_DRM_AMD_DC_HDCP
3048 	if (adev->dm.hdcp_workqueue) {
3049 		hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
3050 		dm_con_state->update_hdcp = true;
3051 	}
3052 #endif
3053 	if (aconnector->fake_enable)
3054 		aconnector->fake_enable = false;
3055 
3056 	if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
3057 		DRM_ERROR("KMS: Failed to detect connector\n");
3058 
3059 	if (aconnector->base.force && new_connection_type == dc_connection_none) {
3060 		emulated_link_detect(aconnector->dc_link);
3061 
3062 		drm_modeset_lock_all(dev);
3063 		dm_restore_drm_connector_state(dev, connector);
3064 		drm_modeset_unlock_all(dev);
3065 
3066 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3067 			drm_kms_helper_connector_hotplug_event(connector);
3068 
3069 	} else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
3070 		if (new_connection_type == dc_connection_none &&
3071 		    aconnector->dc_link->type == dc_connection_none &&
3072 		    dm_crtc_state)
3073 			dm_set_dpms_off(aconnector->dc_link, dm_crtc_state);
3074 
3075 		amdgpu_dm_update_connector_after_detect(aconnector);
3076 
3077 		drm_modeset_lock_all(dev);
3078 		dm_restore_drm_connector_state(dev, connector);
3079 		drm_modeset_unlock_all(dev);
3080 
3081 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3082 			drm_kms_helper_connector_hotplug_event(connector);
3083 	}
3084 	mutex_unlock(&aconnector->hpd_lock);
3085 
3086 }
3087 
3088 static void handle_hpd_irq(void *param)
3089 {
3090 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3091 
3092 	handle_hpd_irq_helper(aconnector);
3093 
3094 }
3095 
3096 static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
3097 {
3098 	uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
3099 	uint8_t dret;
3100 	bool new_irq_handled = false;
3101 	int dpcd_addr;
3102 	int dpcd_bytes_to_read;
3103 
3104 	const int max_process_count = 30;
3105 	int process_count = 0;
3106 
3107 	const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
3108 
3109 	if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
3110 		dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
3111 		/* DPCD 0x200 - 0x201 for downstream IRQ */
3112 		dpcd_addr = DP_SINK_COUNT;
3113 	} else {
3114 		dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
3115 		/* DPCD 0x2002 - 0x2005 for downstream IRQ */
3116 		dpcd_addr = DP_SINK_COUNT_ESI;
3117 	}
3118 
3119 	dret = drm_dp_dpcd_read(
3120 		&aconnector->dm_dp_aux.aux,
3121 		dpcd_addr,
3122 		esi,
3123 		dpcd_bytes_to_read);
3124 
3125 	while (dret == dpcd_bytes_to_read &&
3126 		process_count < max_process_count) {
3127 		uint8_t retry;
3128 		dret = 0;
3129 
3130 		process_count++;
3131 
3132 		DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
3133 		/* handle HPD short pulse irq */
3134 		if (aconnector->mst_mgr.mst_state)
3135 			drm_dp_mst_hpd_irq(
3136 				&aconnector->mst_mgr,
3137 				esi,
3138 				&new_irq_handled);
3139 
3140 		if (new_irq_handled) {
3141 			/* ACK at DPCD to notify down stream */
3142 			const int ack_dpcd_bytes_to_write =
3143 				dpcd_bytes_to_read - 1;
3144 
3145 			for (retry = 0; retry < 3; retry++) {
3146 				uint8_t wret;
3147 
3148 				wret = drm_dp_dpcd_write(
3149 					&aconnector->dm_dp_aux.aux,
3150 					dpcd_addr + 1,
3151 					&esi[1],
3152 					ack_dpcd_bytes_to_write);
3153 				if (wret == ack_dpcd_bytes_to_write)
3154 					break;
3155 			}
3156 
3157 			/* check if there is new irq to be handled */
3158 			dret = drm_dp_dpcd_read(
3159 				&aconnector->dm_dp_aux.aux,
3160 				dpcd_addr,
3161 				esi,
3162 				dpcd_bytes_to_read);
3163 
3164 			new_irq_handled = false;
3165 		} else {
3166 			break;
3167 		}
3168 	}
3169 
3170 	if (process_count == max_process_count)
3171 		DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
3172 }
3173 
3174 static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq,
3175 							union hpd_irq_data hpd_irq_data)
3176 {
3177 	struct hpd_rx_irq_offload_work *offload_work =
3178 				kzalloc(sizeof(*offload_work), GFP_KERNEL);
3179 
3180 	if (!offload_work) {
3181 		DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n");
3182 		return;
3183 	}
3184 
3185 	INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work);
3186 	offload_work->data = hpd_irq_data;
3187 	offload_work->offload_wq = offload_wq;
3188 
3189 	queue_work(offload_wq->wq, &offload_work->work);
3190 	DRM_DEBUG_KMS("queue work to handle hpd_rx offload work");
3191 }
3192 
3193 static void handle_hpd_rx_irq(void *param)
3194 {
3195 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3196 	struct drm_connector *connector = &aconnector->base;
3197 	struct drm_device *dev = connector->dev;
3198 	struct dc_link *dc_link = aconnector->dc_link;
3199 	bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
3200 	bool result = false;
3201 	enum dc_connection_type new_connection_type = dc_connection_none;
3202 	struct amdgpu_device *adev = drm_to_adev(dev);
3203 	union hpd_irq_data hpd_irq_data;
3204 	bool link_loss = false;
3205 	bool has_left_work = false;
3206 	int idx = aconnector->base.index;
3207 	struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx];
3208 
3209 	memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
3210 
3211 	if (adev->dm.disable_hpd_irq)
3212 		return;
3213 
3214 	/*
3215 	 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
3216 	 * conflict, after implement i2c helper, this mutex should be
3217 	 * retired.
3218 	 */
3219 	mutex_lock(&aconnector->hpd_lock);
3220 
3221 	result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data,
3222 						&link_loss, true, &has_left_work);
3223 
3224 	if (!has_left_work)
3225 		goto out;
3226 
3227 	if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
3228 		schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3229 		goto out;
3230 	}
3231 
3232 	if (dc_link_dp_allow_hpd_rx_irq(dc_link)) {
3233 		if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
3234 			hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
3235 			dm_handle_mst_sideband_msg(aconnector);
3236 			goto out;
3237 		}
3238 
3239 		if (link_loss) {
3240 			bool skip = false;
3241 
3242 			spin_lock(&offload_wq->offload_lock);
3243 			skip = offload_wq->is_handling_link_loss;
3244 
3245 			if (!skip)
3246 				offload_wq->is_handling_link_loss = true;
3247 
3248 			spin_unlock(&offload_wq->offload_lock);
3249 
3250 			if (!skip)
3251 				schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3252 
3253 			goto out;
3254 		}
3255 	}
3256 
3257 out:
3258 	if (result && !is_mst_root_connector) {
3259 		/* Downstream Port status changed. */
3260 		if (!dc_link_detect_sink(dc_link, &new_connection_type))
3261 			DRM_ERROR("KMS: Failed to detect connector\n");
3262 
3263 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
3264 			emulated_link_detect(dc_link);
3265 
3266 			if (aconnector->fake_enable)
3267 				aconnector->fake_enable = false;
3268 
3269 			amdgpu_dm_update_connector_after_detect(aconnector);
3270 
3271 
3272 			drm_modeset_lock_all(dev);
3273 			dm_restore_drm_connector_state(dev, connector);
3274 			drm_modeset_unlock_all(dev);
3275 
3276 			drm_kms_helper_connector_hotplug_event(connector);
3277 		} else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
3278 
3279 			if (aconnector->fake_enable)
3280 				aconnector->fake_enable = false;
3281 
3282 			amdgpu_dm_update_connector_after_detect(aconnector);
3283 
3284 
3285 			drm_modeset_lock_all(dev);
3286 			dm_restore_drm_connector_state(dev, connector);
3287 			drm_modeset_unlock_all(dev);
3288 
3289 			drm_kms_helper_connector_hotplug_event(connector);
3290 		}
3291 	}
3292 #ifdef CONFIG_DRM_AMD_DC_HDCP
3293 	if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
3294 		if (adev->dm.hdcp_workqueue)
3295 			hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
3296 	}
3297 #endif
3298 
3299 	if (dc_link->type != dc_connection_mst_branch)
3300 		drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
3301 
3302 	mutex_unlock(&aconnector->hpd_lock);
3303 }
3304 
3305 static void register_hpd_handlers(struct amdgpu_device *adev)
3306 {
3307 	struct drm_device *dev = adev_to_drm(adev);
3308 	struct drm_connector *connector;
3309 	struct amdgpu_dm_connector *aconnector;
3310 	const struct dc_link *dc_link;
3311 	struct dc_interrupt_params int_params = {0};
3312 
3313 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3314 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3315 
3316 	list_for_each_entry(connector,
3317 			&dev->mode_config.connector_list, head)	{
3318 
3319 		aconnector = to_amdgpu_dm_connector(connector);
3320 		dc_link = aconnector->dc_link;
3321 
3322 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
3323 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3324 			int_params.irq_source = dc_link->irq_source_hpd;
3325 
3326 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
3327 					handle_hpd_irq,
3328 					(void *) aconnector);
3329 		}
3330 
3331 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
3332 
3333 			/* Also register for DP short pulse (hpd_rx). */
3334 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3335 			int_params.irq_source =	dc_link->irq_source_hpd_rx;
3336 
3337 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
3338 					handle_hpd_rx_irq,
3339 					(void *) aconnector);
3340 
3341 			if (adev->dm.hpd_rx_offload_wq)
3342 				adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
3343 					aconnector;
3344 		}
3345 	}
3346 }
3347 
3348 #if defined(CONFIG_DRM_AMD_DC_SI)
3349 /* Register IRQ sources and initialize IRQ callbacks */
3350 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
3351 {
3352 	struct dc *dc = adev->dm.dc;
3353 	struct common_irq_params *c_irq_params;
3354 	struct dc_interrupt_params int_params = {0};
3355 	int r;
3356 	int i;
3357 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3358 
3359 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3360 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3361 
3362 	/*
3363 	 * Actions of amdgpu_irq_add_id():
3364 	 * 1. Register a set() function with base driver.
3365 	 *    Base driver will call set() function to enable/disable an
3366 	 *    interrupt in DC hardware.
3367 	 * 2. Register amdgpu_dm_irq_handler().
3368 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3369 	 *    coming from DC hardware.
3370 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3371 	 *    for acknowledging and handling. */
3372 
3373 	/* Use VBLANK interrupt */
3374 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
3375 		r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
3376 		if (r) {
3377 			DRM_ERROR("Failed to add crtc irq id!\n");
3378 			return r;
3379 		}
3380 
3381 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3382 		int_params.irq_source =
3383 			dc_interrupt_to_irq_source(dc, i+1 , 0);
3384 
3385 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3386 
3387 		c_irq_params->adev = adev;
3388 		c_irq_params->irq_src = int_params.irq_source;
3389 
3390 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3391 				dm_crtc_high_irq, c_irq_params);
3392 	}
3393 
3394 	/* Use GRPH_PFLIP interrupt */
3395 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3396 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3397 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3398 		if (r) {
3399 			DRM_ERROR("Failed to add page flip irq id!\n");
3400 			return r;
3401 		}
3402 
3403 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3404 		int_params.irq_source =
3405 			dc_interrupt_to_irq_source(dc, i, 0);
3406 
3407 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3408 
3409 		c_irq_params->adev = adev;
3410 		c_irq_params->irq_src = int_params.irq_source;
3411 
3412 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3413 				dm_pflip_high_irq, c_irq_params);
3414 
3415 	}
3416 
3417 	/* HPD */
3418 	r = amdgpu_irq_add_id(adev, client_id,
3419 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3420 	if (r) {
3421 		DRM_ERROR("Failed to add hpd irq id!\n");
3422 		return r;
3423 	}
3424 
3425 	register_hpd_handlers(adev);
3426 
3427 	return 0;
3428 }
3429 #endif
3430 
3431 /* Register IRQ sources and initialize IRQ callbacks */
3432 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
3433 {
3434 	struct dc *dc = adev->dm.dc;
3435 	struct common_irq_params *c_irq_params;
3436 	struct dc_interrupt_params int_params = {0};
3437 	int r;
3438 	int i;
3439 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3440 
3441 	if (adev->family >= AMDGPU_FAMILY_AI)
3442 		client_id = SOC15_IH_CLIENTID_DCE;
3443 
3444 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3445 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3446 
3447 	/*
3448 	 * Actions of amdgpu_irq_add_id():
3449 	 * 1. Register a set() function with base driver.
3450 	 *    Base driver will call set() function to enable/disable an
3451 	 *    interrupt in DC hardware.
3452 	 * 2. Register amdgpu_dm_irq_handler().
3453 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3454 	 *    coming from DC hardware.
3455 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3456 	 *    for acknowledging and handling. */
3457 
3458 	/* Use VBLANK interrupt */
3459 	for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
3460 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
3461 		if (r) {
3462 			DRM_ERROR("Failed to add crtc irq id!\n");
3463 			return r;
3464 		}
3465 
3466 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3467 		int_params.irq_source =
3468 			dc_interrupt_to_irq_source(dc, i, 0);
3469 
3470 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3471 
3472 		c_irq_params->adev = adev;
3473 		c_irq_params->irq_src = int_params.irq_source;
3474 
3475 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3476 				dm_crtc_high_irq, c_irq_params);
3477 	}
3478 
3479 	/* Use VUPDATE interrupt */
3480 	for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
3481 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
3482 		if (r) {
3483 			DRM_ERROR("Failed to add vupdate irq id!\n");
3484 			return r;
3485 		}
3486 
3487 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3488 		int_params.irq_source =
3489 			dc_interrupt_to_irq_source(dc, i, 0);
3490 
3491 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3492 
3493 		c_irq_params->adev = adev;
3494 		c_irq_params->irq_src = int_params.irq_source;
3495 
3496 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3497 				dm_vupdate_high_irq, c_irq_params);
3498 	}
3499 
3500 	/* Use GRPH_PFLIP interrupt */
3501 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3502 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3503 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3504 		if (r) {
3505 			DRM_ERROR("Failed to add page flip irq id!\n");
3506 			return r;
3507 		}
3508 
3509 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3510 		int_params.irq_source =
3511 			dc_interrupt_to_irq_source(dc, i, 0);
3512 
3513 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3514 
3515 		c_irq_params->adev = adev;
3516 		c_irq_params->irq_src = int_params.irq_source;
3517 
3518 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3519 				dm_pflip_high_irq, c_irq_params);
3520 
3521 	}
3522 
3523 	/* HPD */
3524 	r = amdgpu_irq_add_id(adev, client_id,
3525 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3526 	if (r) {
3527 		DRM_ERROR("Failed to add hpd irq id!\n");
3528 		return r;
3529 	}
3530 
3531 	register_hpd_handlers(adev);
3532 
3533 	return 0;
3534 }
3535 
3536 #if defined(CONFIG_DRM_AMD_DC_DCN)
3537 /* Register IRQ sources and initialize IRQ callbacks */
3538 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3539 {
3540 	struct dc *dc = adev->dm.dc;
3541 	struct common_irq_params *c_irq_params;
3542 	struct dc_interrupt_params int_params = {0};
3543 	int r;
3544 	int i;
3545 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3546 	static const unsigned int vrtl_int_srcid[] = {
3547 		DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3548 		DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3549 		DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3550 		DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3551 		DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3552 		DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3553 	};
3554 #endif
3555 
3556 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3557 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3558 
3559 	/*
3560 	 * Actions of amdgpu_irq_add_id():
3561 	 * 1. Register a set() function with base driver.
3562 	 *    Base driver will call set() function to enable/disable an
3563 	 *    interrupt in DC hardware.
3564 	 * 2. Register amdgpu_dm_irq_handler().
3565 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3566 	 *    coming from DC hardware.
3567 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3568 	 *    for acknowledging and handling.
3569 	 */
3570 
3571 	/* Use VSTARTUP interrupt */
3572 	for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3573 			i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3574 			i++) {
3575 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
3576 
3577 		if (r) {
3578 			DRM_ERROR("Failed to add crtc irq id!\n");
3579 			return r;
3580 		}
3581 
3582 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3583 		int_params.irq_source =
3584 			dc_interrupt_to_irq_source(dc, i, 0);
3585 
3586 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3587 
3588 		c_irq_params->adev = adev;
3589 		c_irq_params->irq_src = int_params.irq_source;
3590 
3591 		amdgpu_dm_irq_register_interrupt(
3592 			adev, &int_params, dm_crtc_high_irq, c_irq_params);
3593 	}
3594 
3595 	/* Use otg vertical line interrupt */
3596 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3597 	for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3598 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3599 				vrtl_int_srcid[i], &adev->vline0_irq);
3600 
3601 		if (r) {
3602 			DRM_ERROR("Failed to add vline0 irq id!\n");
3603 			return r;
3604 		}
3605 
3606 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3607 		int_params.irq_source =
3608 			dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3609 
3610 		if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3611 			DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3612 			break;
3613 		}
3614 
3615 		c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3616 					- DC_IRQ_SOURCE_DC1_VLINE0];
3617 
3618 		c_irq_params->adev = adev;
3619 		c_irq_params->irq_src = int_params.irq_source;
3620 
3621 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3622 				dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3623 	}
3624 #endif
3625 
3626 	/* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3627 	 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3628 	 * to trigger at end of each vblank, regardless of state of the lock,
3629 	 * matching DCE behaviour.
3630 	 */
3631 	for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3632 	     i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3633 	     i++) {
3634 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3635 
3636 		if (r) {
3637 			DRM_ERROR("Failed to add vupdate irq id!\n");
3638 			return r;
3639 		}
3640 
3641 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3642 		int_params.irq_source =
3643 			dc_interrupt_to_irq_source(dc, i, 0);
3644 
3645 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3646 
3647 		c_irq_params->adev = adev;
3648 		c_irq_params->irq_src = int_params.irq_source;
3649 
3650 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3651 				dm_vupdate_high_irq, c_irq_params);
3652 	}
3653 
3654 	/* Use GRPH_PFLIP interrupt */
3655 	for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3656 			i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + dc->caps.max_otg_num - 1;
3657 			i++) {
3658 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
3659 		if (r) {
3660 			DRM_ERROR("Failed to add page flip irq id!\n");
3661 			return r;
3662 		}
3663 
3664 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3665 		int_params.irq_source =
3666 			dc_interrupt_to_irq_source(dc, i, 0);
3667 
3668 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3669 
3670 		c_irq_params->adev = adev;
3671 		c_irq_params->irq_src = int_params.irq_source;
3672 
3673 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3674 				dm_pflip_high_irq, c_irq_params);
3675 
3676 	}
3677 
3678 	/* HPD */
3679 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3680 			&adev->hpd_irq);
3681 	if (r) {
3682 		DRM_ERROR("Failed to add hpd irq id!\n");
3683 		return r;
3684 	}
3685 
3686 	register_hpd_handlers(adev);
3687 
3688 	return 0;
3689 }
3690 /* Register Outbox IRQ sources and initialize IRQ callbacks */
3691 static int register_outbox_irq_handlers(struct amdgpu_device *adev)
3692 {
3693 	struct dc *dc = adev->dm.dc;
3694 	struct common_irq_params *c_irq_params;
3695 	struct dc_interrupt_params int_params = {0};
3696 	int r, i;
3697 
3698 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3699 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3700 
3701 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
3702 			&adev->dmub_outbox_irq);
3703 	if (r) {
3704 		DRM_ERROR("Failed to add outbox irq id!\n");
3705 		return r;
3706 	}
3707 
3708 	if (dc->ctx->dmub_srv) {
3709 		i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
3710 		int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3711 		int_params.irq_source =
3712 		dc_interrupt_to_irq_source(dc, i, 0);
3713 
3714 		c_irq_params = &adev->dm.dmub_outbox_params[0];
3715 
3716 		c_irq_params->adev = adev;
3717 		c_irq_params->irq_src = int_params.irq_source;
3718 
3719 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3720 				dm_dmub_outbox1_low_irq, c_irq_params);
3721 	}
3722 
3723 	return 0;
3724 }
3725 #endif
3726 
3727 /*
3728  * Acquires the lock for the atomic state object and returns
3729  * the new atomic state.
3730  *
3731  * This should only be called during atomic check.
3732  */
3733 int dm_atomic_get_state(struct drm_atomic_state *state,
3734 			struct dm_atomic_state **dm_state)
3735 {
3736 	struct drm_device *dev = state->dev;
3737 	struct amdgpu_device *adev = drm_to_adev(dev);
3738 	struct amdgpu_display_manager *dm = &adev->dm;
3739 	struct drm_private_state *priv_state;
3740 
3741 	if (*dm_state)
3742 		return 0;
3743 
3744 	priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3745 	if (IS_ERR(priv_state))
3746 		return PTR_ERR(priv_state);
3747 
3748 	*dm_state = to_dm_atomic_state(priv_state);
3749 
3750 	return 0;
3751 }
3752 
3753 static struct dm_atomic_state *
3754 dm_atomic_get_new_state(struct drm_atomic_state *state)
3755 {
3756 	struct drm_device *dev = state->dev;
3757 	struct amdgpu_device *adev = drm_to_adev(dev);
3758 	struct amdgpu_display_manager *dm = &adev->dm;
3759 	struct drm_private_obj *obj;
3760 	struct drm_private_state *new_obj_state;
3761 	int i;
3762 
3763 	for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3764 		if (obj->funcs == dm->atomic_obj.funcs)
3765 			return to_dm_atomic_state(new_obj_state);
3766 	}
3767 
3768 	return NULL;
3769 }
3770 
3771 static struct drm_private_state *
3772 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3773 {
3774 	struct dm_atomic_state *old_state, *new_state;
3775 
3776 	new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3777 	if (!new_state)
3778 		return NULL;
3779 
3780 	__drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3781 
3782 	old_state = to_dm_atomic_state(obj->state);
3783 
3784 	if (old_state && old_state->context)
3785 		new_state->context = dc_copy_state(old_state->context);
3786 
3787 	if (!new_state->context) {
3788 		kfree(new_state);
3789 		return NULL;
3790 	}
3791 
3792 	return &new_state->base;
3793 }
3794 
3795 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3796 				    struct drm_private_state *state)
3797 {
3798 	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3799 
3800 	if (dm_state && dm_state->context)
3801 		dc_release_state(dm_state->context);
3802 
3803 	kfree(dm_state);
3804 }
3805 
3806 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3807 	.atomic_duplicate_state = dm_atomic_duplicate_state,
3808 	.atomic_destroy_state = dm_atomic_destroy_state,
3809 };
3810 
3811 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3812 {
3813 	struct dm_atomic_state *state;
3814 	int r;
3815 
3816 	adev->mode_info.mode_config_initialized = true;
3817 
3818 	adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3819 	adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3820 
3821 	adev_to_drm(adev)->mode_config.max_width = 16384;
3822 	adev_to_drm(adev)->mode_config.max_height = 16384;
3823 
3824 	adev_to_drm(adev)->mode_config.preferred_depth = 24;
3825 	adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3826 	/* indicates support for immediate flip */
3827 	adev_to_drm(adev)->mode_config.async_page_flip = true;
3828 
3829 	adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3830 
3831 	state = kzalloc(sizeof(*state), GFP_KERNEL);
3832 	if (!state)
3833 		return -ENOMEM;
3834 
3835 	state->context = dc_create_state(adev->dm.dc);
3836 	if (!state->context) {
3837 		kfree(state);
3838 		return -ENOMEM;
3839 	}
3840 
3841 	dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3842 
3843 	drm_atomic_private_obj_init(adev_to_drm(adev),
3844 				    &adev->dm.atomic_obj,
3845 				    &state->base,
3846 				    &dm_atomic_state_funcs);
3847 
3848 	r = amdgpu_display_modeset_create_props(adev);
3849 	if (r) {
3850 		dc_release_state(state->context);
3851 		kfree(state);
3852 		return r;
3853 	}
3854 
3855 	r = amdgpu_dm_audio_init(adev);
3856 	if (r) {
3857 		dc_release_state(state->context);
3858 		kfree(state);
3859 		return r;
3860 	}
3861 
3862 	return 0;
3863 }
3864 
3865 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3866 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3867 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3868 
3869 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3870 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3871 
3872 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
3873 					    int bl_idx)
3874 {
3875 #if defined(CONFIG_ACPI)
3876 	struct amdgpu_dm_backlight_caps caps;
3877 
3878 	memset(&caps, 0, sizeof(caps));
3879 
3880 	if (dm->backlight_caps[bl_idx].caps_valid)
3881 		return;
3882 
3883 	amdgpu_acpi_get_backlight_caps(&caps);
3884 	if (caps.caps_valid) {
3885 		dm->backlight_caps[bl_idx].caps_valid = true;
3886 		if (caps.aux_support)
3887 			return;
3888 		dm->backlight_caps[bl_idx].min_input_signal = caps.min_input_signal;
3889 		dm->backlight_caps[bl_idx].max_input_signal = caps.max_input_signal;
3890 	} else {
3891 		dm->backlight_caps[bl_idx].min_input_signal =
3892 				AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3893 		dm->backlight_caps[bl_idx].max_input_signal =
3894 				AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3895 	}
3896 #else
3897 	if (dm->backlight_caps[bl_idx].aux_support)
3898 		return;
3899 
3900 	dm->backlight_caps[bl_idx].min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3901 	dm->backlight_caps[bl_idx].max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3902 #endif
3903 }
3904 
3905 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3906 				unsigned *min, unsigned *max)
3907 {
3908 	if (!caps)
3909 		return 0;
3910 
3911 	if (caps->aux_support) {
3912 		// Firmware limits are in nits, DC API wants millinits.
3913 		*max = 1000 * caps->aux_max_input_signal;
3914 		*min = 1000 * caps->aux_min_input_signal;
3915 	} else {
3916 		// Firmware limits are 8-bit, PWM control is 16-bit.
3917 		*max = 0x101 * caps->max_input_signal;
3918 		*min = 0x101 * caps->min_input_signal;
3919 	}
3920 	return 1;
3921 }
3922 
3923 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3924 					uint32_t brightness)
3925 {
3926 	unsigned min, max;
3927 
3928 	if (!get_brightness_range(caps, &min, &max))
3929 		return brightness;
3930 
3931 	// Rescale 0..255 to min..max
3932 	return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3933 				       AMDGPU_MAX_BL_LEVEL);
3934 }
3935 
3936 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3937 				      uint32_t brightness)
3938 {
3939 	unsigned min, max;
3940 
3941 	if (!get_brightness_range(caps, &min, &max))
3942 		return brightness;
3943 
3944 	if (brightness < min)
3945 		return 0;
3946 	// Rescale min..max to 0..255
3947 	return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3948 				 max - min);
3949 }
3950 
3951 static int amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
3952 					 int bl_idx,
3953 					 u32 user_brightness)
3954 {
3955 	struct amdgpu_dm_backlight_caps caps;
3956 	struct dc_link *link;
3957 	u32 brightness;
3958 	bool rc;
3959 
3960 	amdgpu_dm_update_backlight_caps(dm, bl_idx);
3961 	caps = dm->backlight_caps[bl_idx];
3962 
3963 	dm->brightness[bl_idx] = user_brightness;
3964 	/* update scratch register */
3965 	if (bl_idx == 0)
3966 		amdgpu_atombios_scratch_regs_set_backlight_level(dm->adev, dm->brightness[bl_idx]);
3967 	brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]);
3968 	link = (struct dc_link *)dm->backlight_link[bl_idx];
3969 
3970 	/* Change brightness based on AUX property */
3971 	if (caps.aux_support) {
3972 		rc = dc_link_set_backlight_level_nits(link, true, brightness,
3973 						      AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3974 		if (!rc)
3975 			DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", bl_idx);
3976 	} else {
3977 		rc = dc_link_set_backlight_level(link, brightness, 0);
3978 		if (!rc)
3979 			DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx);
3980 	}
3981 
3982 	return rc ? 0 : 1;
3983 }
3984 
3985 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3986 {
3987 	struct amdgpu_display_manager *dm = bl_get_data(bd);
3988 	int i;
3989 
3990 	for (i = 0; i < dm->num_of_edps; i++) {
3991 		if (bd == dm->backlight_dev[i])
3992 			break;
3993 	}
3994 	if (i >= AMDGPU_DM_MAX_NUM_EDP)
3995 		i = 0;
3996 	amdgpu_dm_backlight_set_level(dm, i, bd->props.brightness);
3997 
3998 	return 0;
3999 }
4000 
4001 static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm,
4002 					 int bl_idx)
4003 {
4004 	struct amdgpu_dm_backlight_caps caps;
4005 	struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx];
4006 
4007 	amdgpu_dm_update_backlight_caps(dm, bl_idx);
4008 	caps = dm->backlight_caps[bl_idx];
4009 
4010 	if (caps.aux_support) {
4011 		u32 avg, peak;
4012 		bool rc;
4013 
4014 		rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
4015 		if (!rc)
4016 			return dm->brightness[bl_idx];
4017 		return convert_brightness_to_user(&caps, avg);
4018 	} else {
4019 		int ret = dc_link_get_backlight_level(link);
4020 
4021 		if (ret == DC_ERROR_UNEXPECTED)
4022 			return dm->brightness[bl_idx];
4023 		return convert_brightness_to_user(&caps, ret);
4024 	}
4025 }
4026 
4027 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
4028 {
4029 	struct amdgpu_display_manager *dm = bl_get_data(bd);
4030 	int i;
4031 
4032 	for (i = 0; i < dm->num_of_edps; i++) {
4033 		if (bd == dm->backlight_dev[i])
4034 			break;
4035 	}
4036 	if (i >= AMDGPU_DM_MAX_NUM_EDP)
4037 		i = 0;
4038 	return amdgpu_dm_backlight_get_level(dm, i);
4039 }
4040 
4041 static const struct backlight_ops amdgpu_dm_backlight_ops = {
4042 	.options = BL_CORE_SUSPENDRESUME,
4043 	.get_brightness = amdgpu_dm_backlight_get_brightness,
4044 	.update_status	= amdgpu_dm_backlight_update_status,
4045 };
4046 
4047 static void
4048 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
4049 {
4050 	char bl_name[16];
4051 	struct backlight_properties props = { 0 };
4052 
4053 	amdgpu_dm_update_backlight_caps(dm, dm->num_of_edps);
4054 	dm->brightness[dm->num_of_edps] = AMDGPU_MAX_BL_LEVEL;
4055 
4056 	props.max_brightness = AMDGPU_MAX_BL_LEVEL;
4057 	props.brightness = AMDGPU_MAX_BL_LEVEL;
4058 	props.type = BACKLIGHT_RAW;
4059 
4060 	snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
4061 		 adev_to_drm(dm->adev)->primary->index + dm->num_of_edps);
4062 
4063 	dm->backlight_dev[dm->num_of_edps] = backlight_device_register(bl_name,
4064 								       adev_to_drm(dm->adev)->dev,
4065 								       dm,
4066 								       &amdgpu_dm_backlight_ops,
4067 								       &props);
4068 
4069 	if (IS_ERR(dm->backlight_dev[dm->num_of_edps]))
4070 		DRM_ERROR("DM: Backlight registration failed!\n");
4071 	else
4072 		DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
4073 }
4074 #endif
4075 
4076 static int initialize_plane(struct amdgpu_display_manager *dm,
4077 			    struct amdgpu_mode_info *mode_info, int plane_id,
4078 			    enum drm_plane_type plane_type,
4079 			    const struct dc_plane_cap *plane_cap)
4080 {
4081 	struct drm_plane *plane;
4082 	unsigned long possible_crtcs;
4083 	int ret = 0;
4084 
4085 	plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
4086 	if (!plane) {
4087 		DRM_ERROR("KMS: Failed to allocate plane\n");
4088 		return -ENOMEM;
4089 	}
4090 	plane->type = plane_type;
4091 
4092 	/*
4093 	 * HACK: IGT tests expect that the primary plane for a CRTC
4094 	 * can only have one possible CRTC. Only expose support for
4095 	 * any CRTC if they're not going to be used as a primary plane
4096 	 * for a CRTC - like overlay or underlay planes.
4097 	 */
4098 	possible_crtcs = 1 << plane_id;
4099 	if (plane_id >= dm->dc->caps.max_streams)
4100 		possible_crtcs = 0xff;
4101 
4102 	ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
4103 
4104 	if (ret) {
4105 		DRM_ERROR("KMS: Failed to initialize plane\n");
4106 		kfree(plane);
4107 		return ret;
4108 	}
4109 
4110 	if (mode_info)
4111 		mode_info->planes[plane_id] = plane;
4112 
4113 	return ret;
4114 }
4115 
4116 
4117 static void register_backlight_device(struct amdgpu_display_manager *dm,
4118 				      struct dc_link *link)
4119 {
4120 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
4121 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
4122 
4123 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
4124 	    link->type != dc_connection_none) {
4125 		/*
4126 		 * Event if registration failed, we should continue with
4127 		 * DM initialization because not having a backlight control
4128 		 * is better then a black screen.
4129 		 */
4130 		if (!dm->backlight_dev[dm->num_of_edps])
4131 			amdgpu_dm_register_backlight_device(dm);
4132 
4133 		if (dm->backlight_dev[dm->num_of_edps]) {
4134 			dm->backlight_link[dm->num_of_edps] = link;
4135 			dm->num_of_edps++;
4136 		}
4137 	}
4138 #endif
4139 }
4140 
4141 
4142 /*
4143  * In this architecture, the association
4144  * connector -> encoder -> crtc
4145  * id not really requried. The crtc and connector will hold the
4146  * display_index as an abstraction to use with DAL component
4147  *
4148  * Returns 0 on success
4149  */
4150 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
4151 {
4152 	struct amdgpu_display_manager *dm = &adev->dm;
4153 	int32_t i;
4154 	struct amdgpu_dm_connector *aconnector = NULL;
4155 	struct amdgpu_encoder *aencoder = NULL;
4156 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
4157 	uint32_t link_cnt;
4158 	int32_t primary_planes;
4159 	enum dc_connection_type new_connection_type = dc_connection_none;
4160 	const struct dc_plane_cap *plane;
4161 	bool psr_feature_enabled = false;
4162 
4163 	dm->display_indexes_num = dm->dc->caps.max_streams;
4164 	/* Update the actual used number of crtc */
4165 	adev->mode_info.num_crtc = adev->dm.display_indexes_num;
4166 
4167 	link_cnt = dm->dc->caps.max_links;
4168 	if (amdgpu_dm_mode_config_init(dm->adev)) {
4169 		DRM_ERROR("DM: Failed to initialize mode config\n");
4170 		return -EINVAL;
4171 	}
4172 
4173 	/* There is one primary plane per CRTC */
4174 	primary_planes = dm->dc->caps.max_streams;
4175 	ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
4176 
4177 	/*
4178 	 * Initialize primary planes, implicit planes for legacy IOCTLS.
4179 	 * Order is reversed to match iteration order in atomic check.
4180 	 */
4181 	for (i = (primary_planes - 1); i >= 0; i--) {
4182 		plane = &dm->dc->caps.planes[i];
4183 
4184 		if (initialize_plane(dm, mode_info, i,
4185 				     DRM_PLANE_TYPE_PRIMARY, plane)) {
4186 			DRM_ERROR("KMS: Failed to initialize primary plane\n");
4187 			goto fail;
4188 		}
4189 	}
4190 
4191 	/*
4192 	 * Initialize overlay planes, index starting after primary planes.
4193 	 * These planes have a higher DRM index than the primary planes since
4194 	 * they should be considered as having a higher z-order.
4195 	 * Order is reversed to match iteration order in atomic check.
4196 	 *
4197 	 * Only support DCN for now, and only expose one so we don't encourage
4198 	 * userspace to use up all the pipes.
4199 	 */
4200 	for (i = 0; i < dm->dc->caps.max_planes; ++i) {
4201 		struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
4202 
4203 		if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
4204 			continue;
4205 
4206 		if (!plane->blends_with_above || !plane->blends_with_below)
4207 			continue;
4208 
4209 		if (!plane->pixel_format_support.argb8888)
4210 			continue;
4211 
4212 		if (initialize_plane(dm, NULL, primary_planes + i,
4213 				     DRM_PLANE_TYPE_OVERLAY, plane)) {
4214 			DRM_ERROR("KMS: Failed to initialize overlay plane\n");
4215 			goto fail;
4216 		}
4217 
4218 		/* Only create one overlay plane. */
4219 		break;
4220 	}
4221 
4222 	for (i = 0; i < dm->dc->caps.max_streams; i++)
4223 		if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
4224 			DRM_ERROR("KMS: Failed to initialize crtc\n");
4225 			goto fail;
4226 		}
4227 
4228 #if defined(CONFIG_DRM_AMD_DC_DCN)
4229 	/* Use Outbox interrupt */
4230 	switch (adev->ip_versions[DCE_HWIP][0]) {
4231 	case IP_VERSION(3, 0, 0):
4232 	case IP_VERSION(3, 1, 2):
4233 	case IP_VERSION(3, 1, 3):
4234 	case IP_VERSION(3, 1, 6):
4235 	case IP_VERSION(2, 1, 0):
4236 		if (register_outbox_irq_handlers(dm->adev)) {
4237 			DRM_ERROR("DM: Failed to initialize IRQ\n");
4238 			goto fail;
4239 		}
4240 		break;
4241 	default:
4242 		DRM_DEBUG_KMS("Unsupported DCN IP version for outbox: 0x%X\n",
4243 			      adev->ip_versions[DCE_HWIP][0]);
4244 	}
4245 
4246 	/* Determine whether to enable PSR support by default. */
4247 	if (!(amdgpu_dc_debug_mask & DC_DISABLE_PSR)) {
4248 		switch (adev->ip_versions[DCE_HWIP][0]) {
4249 		case IP_VERSION(3, 1, 2):
4250 		case IP_VERSION(3, 1, 3):
4251 		case IP_VERSION(3, 1, 6):
4252 			psr_feature_enabled = true;
4253 			break;
4254 		default:
4255 			psr_feature_enabled = amdgpu_dc_feature_mask & DC_PSR_MASK;
4256 			break;
4257 		}
4258 	}
4259 #endif
4260 
4261 	/* Disable vblank IRQs aggressively for power-saving. */
4262 	adev_to_drm(adev)->vblank_disable_immediate = true;
4263 
4264 	/* loops over all connectors on the board */
4265 	for (i = 0; i < link_cnt; i++) {
4266 		struct dc_link *link = NULL;
4267 
4268 		if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
4269 			DRM_ERROR(
4270 				"KMS: Cannot support more than %d display indexes\n",
4271 					AMDGPU_DM_MAX_DISPLAY_INDEX);
4272 			continue;
4273 		}
4274 
4275 		aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
4276 		if (!aconnector)
4277 			goto fail;
4278 
4279 		aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
4280 		if (!aencoder)
4281 			goto fail;
4282 
4283 		if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
4284 			DRM_ERROR("KMS: Failed to initialize encoder\n");
4285 			goto fail;
4286 		}
4287 
4288 		if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
4289 			DRM_ERROR("KMS: Failed to initialize connector\n");
4290 			goto fail;
4291 		}
4292 
4293 		link = dc_get_link_at_index(dm->dc, i);
4294 
4295 		if (!dc_link_detect_sink(link, &new_connection_type))
4296 			DRM_ERROR("KMS: Failed to detect connector\n");
4297 
4298 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
4299 			emulated_link_detect(link);
4300 			amdgpu_dm_update_connector_after_detect(aconnector);
4301 
4302 		} else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
4303 			amdgpu_dm_update_connector_after_detect(aconnector);
4304 			register_backlight_device(dm, link);
4305 			if (dm->num_of_edps)
4306 				update_connector_ext_caps(aconnector);
4307 			if (psr_feature_enabled)
4308 				amdgpu_dm_set_psr_caps(link);
4309 
4310 			/* TODO: Fix vblank control helpers to delay PSR entry to allow this when
4311 			 * PSR is also supported.
4312 			 */
4313 			if (link->psr_settings.psr_feature_enabled)
4314 				adev_to_drm(adev)->vblank_disable_immediate = false;
4315 		}
4316 
4317 
4318 	}
4319 
4320 	/* Software is initialized. Now we can register interrupt handlers. */
4321 	switch (adev->asic_type) {
4322 #if defined(CONFIG_DRM_AMD_DC_SI)
4323 	case CHIP_TAHITI:
4324 	case CHIP_PITCAIRN:
4325 	case CHIP_VERDE:
4326 	case CHIP_OLAND:
4327 		if (dce60_register_irq_handlers(dm->adev)) {
4328 			DRM_ERROR("DM: Failed to initialize IRQ\n");
4329 			goto fail;
4330 		}
4331 		break;
4332 #endif
4333 	case CHIP_BONAIRE:
4334 	case CHIP_HAWAII:
4335 	case CHIP_KAVERI:
4336 	case CHIP_KABINI:
4337 	case CHIP_MULLINS:
4338 	case CHIP_TONGA:
4339 	case CHIP_FIJI:
4340 	case CHIP_CARRIZO:
4341 	case CHIP_STONEY:
4342 	case CHIP_POLARIS11:
4343 	case CHIP_POLARIS10:
4344 	case CHIP_POLARIS12:
4345 	case CHIP_VEGAM:
4346 	case CHIP_VEGA10:
4347 	case CHIP_VEGA12:
4348 	case CHIP_VEGA20:
4349 		if (dce110_register_irq_handlers(dm->adev)) {
4350 			DRM_ERROR("DM: Failed to initialize IRQ\n");
4351 			goto fail;
4352 		}
4353 		break;
4354 	default:
4355 #if defined(CONFIG_DRM_AMD_DC_DCN)
4356 		switch (adev->ip_versions[DCE_HWIP][0]) {
4357 		case IP_VERSION(1, 0, 0):
4358 		case IP_VERSION(1, 0, 1):
4359 		case IP_VERSION(2, 0, 2):
4360 		case IP_VERSION(2, 0, 3):
4361 		case IP_VERSION(2, 0, 0):
4362 		case IP_VERSION(2, 1, 0):
4363 		case IP_VERSION(3, 0, 0):
4364 		case IP_VERSION(3, 0, 2):
4365 		case IP_VERSION(3, 0, 3):
4366 		case IP_VERSION(3, 0, 1):
4367 		case IP_VERSION(3, 1, 2):
4368 		case IP_VERSION(3, 1, 3):
4369 		case IP_VERSION(3, 1, 6):
4370 			if (dcn10_register_irq_handlers(dm->adev)) {
4371 				DRM_ERROR("DM: Failed to initialize IRQ\n");
4372 				goto fail;
4373 			}
4374 			break;
4375 		default:
4376 			DRM_ERROR("Unsupported DCE IP versions: 0x%X\n",
4377 					adev->ip_versions[DCE_HWIP][0]);
4378 			goto fail;
4379 		}
4380 #endif
4381 		break;
4382 	}
4383 
4384 	return 0;
4385 fail:
4386 	kfree(aencoder);
4387 	kfree(aconnector);
4388 
4389 	return -EINVAL;
4390 }
4391 
4392 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
4393 {
4394 	drm_atomic_private_obj_fini(&dm->atomic_obj);
4395 	return;
4396 }
4397 
4398 /******************************************************************************
4399  * amdgpu_display_funcs functions
4400  *****************************************************************************/
4401 
4402 /*
4403  * dm_bandwidth_update - program display watermarks
4404  *
4405  * @adev: amdgpu_device pointer
4406  *
4407  * Calculate and program the display watermarks and line buffer allocation.
4408  */
4409 static void dm_bandwidth_update(struct amdgpu_device *adev)
4410 {
4411 	/* TODO: implement later */
4412 }
4413 
4414 static const struct amdgpu_display_funcs dm_display_funcs = {
4415 	.bandwidth_update = dm_bandwidth_update, /* called unconditionally */
4416 	.vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
4417 	.backlight_set_level = NULL, /* never called for DC */
4418 	.backlight_get_level = NULL, /* never called for DC */
4419 	.hpd_sense = NULL,/* called unconditionally */
4420 	.hpd_set_polarity = NULL, /* called unconditionally */
4421 	.hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
4422 	.page_flip_get_scanoutpos =
4423 		dm_crtc_get_scanoutpos,/* called unconditionally */
4424 	.add_encoder = NULL, /* VBIOS parsing. DAL does it. */
4425 	.add_connector = NULL, /* VBIOS parsing. DAL does it. */
4426 };
4427 
4428 #if defined(CONFIG_DEBUG_KERNEL_DC)
4429 
4430 static ssize_t s3_debug_store(struct device *device,
4431 			      struct device_attribute *attr,
4432 			      const char *buf,
4433 			      size_t count)
4434 {
4435 	int ret;
4436 	int s3_state;
4437 	struct drm_device *drm_dev = dev_get_drvdata(device);
4438 	struct amdgpu_device *adev = drm_to_adev(drm_dev);
4439 
4440 	ret = kstrtoint(buf, 0, &s3_state);
4441 
4442 	if (ret == 0) {
4443 		if (s3_state) {
4444 			dm_resume(adev);
4445 			drm_kms_helper_hotplug_event(adev_to_drm(adev));
4446 		} else
4447 			dm_suspend(adev);
4448 	}
4449 
4450 	return ret == 0 ? count : 0;
4451 }
4452 
4453 DEVICE_ATTR_WO(s3_debug);
4454 
4455 #endif
4456 
4457 static int dm_early_init(void *handle)
4458 {
4459 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4460 
4461 	switch (adev->asic_type) {
4462 #if defined(CONFIG_DRM_AMD_DC_SI)
4463 	case CHIP_TAHITI:
4464 	case CHIP_PITCAIRN:
4465 	case CHIP_VERDE:
4466 		adev->mode_info.num_crtc = 6;
4467 		adev->mode_info.num_hpd = 6;
4468 		adev->mode_info.num_dig = 6;
4469 		break;
4470 	case CHIP_OLAND:
4471 		adev->mode_info.num_crtc = 2;
4472 		adev->mode_info.num_hpd = 2;
4473 		adev->mode_info.num_dig = 2;
4474 		break;
4475 #endif
4476 	case CHIP_BONAIRE:
4477 	case CHIP_HAWAII:
4478 		adev->mode_info.num_crtc = 6;
4479 		adev->mode_info.num_hpd = 6;
4480 		adev->mode_info.num_dig = 6;
4481 		break;
4482 	case CHIP_KAVERI:
4483 		adev->mode_info.num_crtc = 4;
4484 		adev->mode_info.num_hpd = 6;
4485 		adev->mode_info.num_dig = 7;
4486 		break;
4487 	case CHIP_KABINI:
4488 	case CHIP_MULLINS:
4489 		adev->mode_info.num_crtc = 2;
4490 		adev->mode_info.num_hpd = 6;
4491 		adev->mode_info.num_dig = 6;
4492 		break;
4493 	case CHIP_FIJI:
4494 	case CHIP_TONGA:
4495 		adev->mode_info.num_crtc = 6;
4496 		adev->mode_info.num_hpd = 6;
4497 		adev->mode_info.num_dig = 7;
4498 		break;
4499 	case CHIP_CARRIZO:
4500 		adev->mode_info.num_crtc = 3;
4501 		adev->mode_info.num_hpd = 6;
4502 		adev->mode_info.num_dig = 9;
4503 		break;
4504 	case CHIP_STONEY:
4505 		adev->mode_info.num_crtc = 2;
4506 		adev->mode_info.num_hpd = 6;
4507 		adev->mode_info.num_dig = 9;
4508 		break;
4509 	case CHIP_POLARIS11:
4510 	case CHIP_POLARIS12:
4511 		adev->mode_info.num_crtc = 5;
4512 		adev->mode_info.num_hpd = 5;
4513 		adev->mode_info.num_dig = 5;
4514 		break;
4515 	case CHIP_POLARIS10:
4516 	case CHIP_VEGAM:
4517 		adev->mode_info.num_crtc = 6;
4518 		adev->mode_info.num_hpd = 6;
4519 		adev->mode_info.num_dig = 6;
4520 		break;
4521 	case CHIP_VEGA10:
4522 	case CHIP_VEGA12:
4523 	case CHIP_VEGA20:
4524 		adev->mode_info.num_crtc = 6;
4525 		adev->mode_info.num_hpd = 6;
4526 		adev->mode_info.num_dig = 6;
4527 		break;
4528 	default:
4529 #if defined(CONFIG_DRM_AMD_DC_DCN)
4530 		switch (adev->ip_versions[DCE_HWIP][0]) {
4531 		case IP_VERSION(2, 0, 2):
4532 		case IP_VERSION(3, 0, 0):
4533 			adev->mode_info.num_crtc = 6;
4534 			adev->mode_info.num_hpd = 6;
4535 			adev->mode_info.num_dig = 6;
4536 			break;
4537 		case IP_VERSION(2, 0, 0):
4538 		case IP_VERSION(3, 0, 2):
4539 			adev->mode_info.num_crtc = 5;
4540 			adev->mode_info.num_hpd = 5;
4541 			adev->mode_info.num_dig = 5;
4542 			break;
4543 		case IP_VERSION(2, 0, 3):
4544 		case IP_VERSION(3, 0, 3):
4545 			adev->mode_info.num_crtc = 2;
4546 			adev->mode_info.num_hpd = 2;
4547 			adev->mode_info.num_dig = 2;
4548 			break;
4549 		case IP_VERSION(1, 0, 0):
4550 		case IP_VERSION(1, 0, 1):
4551 		case IP_VERSION(3, 0, 1):
4552 		case IP_VERSION(2, 1, 0):
4553 		case IP_VERSION(3, 1, 2):
4554 		case IP_VERSION(3, 1, 3):
4555 		case IP_VERSION(3, 1, 6):
4556 			adev->mode_info.num_crtc = 4;
4557 			adev->mode_info.num_hpd = 4;
4558 			adev->mode_info.num_dig = 4;
4559 			break;
4560 		default:
4561 			DRM_ERROR("Unsupported DCE IP versions: 0x%x\n",
4562 					adev->ip_versions[DCE_HWIP][0]);
4563 			return -EINVAL;
4564 		}
4565 #endif
4566 		break;
4567 	}
4568 
4569 	amdgpu_dm_set_irq_funcs(adev);
4570 
4571 	if (adev->mode_info.funcs == NULL)
4572 		adev->mode_info.funcs = &dm_display_funcs;
4573 
4574 	/*
4575 	 * Note: Do NOT change adev->audio_endpt_rreg and
4576 	 * adev->audio_endpt_wreg because they are initialised in
4577 	 * amdgpu_device_init()
4578 	 */
4579 #if defined(CONFIG_DEBUG_KERNEL_DC)
4580 	device_create_file(
4581 		adev_to_drm(adev)->dev,
4582 		&dev_attr_s3_debug);
4583 #endif
4584 
4585 	return 0;
4586 }
4587 
4588 static bool modeset_required(struct drm_crtc_state *crtc_state,
4589 			     struct dc_stream_state *new_stream,
4590 			     struct dc_stream_state *old_stream)
4591 {
4592 	return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4593 }
4594 
4595 static bool modereset_required(struct drm_crtc_state *crtc_state)
4596 {
4597 	return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4598 }
4599 
4600 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
4601 {
4602 	drm_encoder_cleanup(encoder);
4603 	kfree(encoder);
4604 }
4605 
4606 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
4607 	.destroy = amdgpu_dm_encoder_destroy,
4608 };
4609 
4610 
4611 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
4612 					 struct drm_framebuffer *fb,
4613 					 int *min_downscale, int *max_upscale)
4614 {
4615 	struct amdgpu_device *adev = drm_to_adev(dev);
4616 	struct dc *dc = adev->dm.dc;
4617 	/* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
4618 	struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
4619 
4620 	switch (fb->format->format) {
4621 	case DRM_FORMAT_P010:
4622 	case DRM_FORMAT_NV12:
4623 	case DRM_FORMAT_NV21:
4624 		*max_upscale = plane_cap->max_upscale_factor.nv12;
4625 		*min_downscale = plane_cap->max_downscale_factor.nv12;
4626 		break;
4627 
4628 	case DRM_FORMAT_XRGB16161616F:
4629 	case DRM_FORMAT_ARGB16161616F:
4630 	case DRM_FORMAT_XBGR16161616F:
4631 	case DRM_FORMAT_ABGR16161616F:
4632 		*max_upscale = plane_cap->max_upscale_factor.fp16;
4633 		*min_downscale = plane_cap->max_downscale_factor.fp16;
4634 		break;
4635 
4636 	default:
4637 		*max_upscale = plane_cap->max_upscale_factor.argb8888;
4638 		*min_downscale = plane_cap->max_downscale_factor.argb8888;
4639 		break;
4640 	}
4641 
4642 	/*
4643 	 * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
4644 	 * scaling factor of 1.0 == 1000 units.
4645 	 */
4646 	if (*max_upscale == 1)
4647 		*max_upscale = 1000;
4648 
4649 	if (*min_downscale == 1)
4650 		*min_downscale = 1000;
4651 }
4652 
4653 
4654 static int fill_dc_scaling_info(struct amdgpu_device *adev,
4655 				const struct drm_plane_state *state,
4656 				struct dc_scaling_info *scaling_info)
4657 {
4658 	int scale_w, scale_h, min_downscale, max_upscale;
4659 
4660 	memset(scaling_info, 0, sizeof(*scaling_info));
4661 
4662 	/* Source is fixed 16.16 but we ignore mantissa for now... */
4663 	scaling_info->src_rect.x = state->src_x >> 16;
4664 	scaling_info->src_rect.y = state->src_y >> 16;
4665 
4666 	/*
4667 	 * For reasons we don't (yet) fully understand a non-zero
4668 	 * src_y coordinate into an NV12 buffer can cause a
4669 	 * system hang on DCN1x.
4670 	 * To avoid hangs (and maybe be overly cautious)
4671 	 * let's reject both non-zero src_x and src_y.
4672 	 *
4673 	 * We currently know of only one use-case to reproduce a
4674 	 * scenario with non-zero src_x and src_y for NV12, which
4675 	 * is to gesture the YouTube Android app into full screen
4676 	 * on ChromeOS.
4677 	 */
4678 	if (((adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 0)) ||
4679 	    (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 1))) &&
4680 	    (state->fb && state->fb->format->format == DRM_FORMAT_NV12 &&
4681 	    (scaling_info->src_rect.x != 0 || scaling_info->src_rect.y != 0)))
4682 		return -EINVAL;
4683 
4684 	scaling_info->src_rect.width = state->src_w >> 16;
4685 	if (scaling_info->src_rect.width == 0)
4686 		return -EINVAL;
4687 
4688 	scaling_info->src_rect.height = state->src_h >> 16;
4689 	if (scaling_info->src_rect.height == 0)
4690 		return -EINVAL;
4691 
4692 	scaling_info->dst_rect.x = state->crtc_x;
4693 	scaling_info->dst_rect.y = state->crtc_y;
4694 
4695 	if (state->crtc_w == 0)
4696 		return -EINVAL;
4697 
4698 	scaling_info->dst_rect.width = state->crtc_w;
4699 
4700 	if (state->crtc_h == 0)
4701 		return -EINVAL;
4702 
4703 	scaling_info->dst_rect.height = state->crtc_h;
4704 
4705 	/* DRM doesn't specify clipping on destination output. */
4706 	scaling_info->clip_rect = scaling_info->dst_rect;
4707 
4708 	/* Validate scaling per-format with DC plane caps */
4709 	if (state->plane && state->plane->dev && state->fb) {
4710 		get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
4711 					     &min_downscale, &max_upscale);
4712 	} else {
4713 		min_downscale = 250;
4714 		max_upscale = 16000;
4715 	}
4716 
4717 	scale_w = scaling_info->dst_rect.width * 1000 /
4718 		  scaling_info->src_rect.width;
4719 
4720 	if (scale_w < min_downscale || scale_w > max_upscale)
4721 		return -EINVAL;
4722 
4723 	scale_h = scaling_info->dst_rect.height * 1000 /
4724 		  scaling_info->src_rect.height;
4725 
4726 	if (scale_h < min_downscale || scale_h > max_upscale)
4727 		return -EINVAL;
4728 
4729 	/*
4730 	 * The "scaling_quality" can be ignored for now, quality = 0 has DC
4731 	 * assume reasonable defaults based on the format.
4732 	 */
4733 
4734 	return 0;
4735 }
4736 
4737 static void
4738 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
4739 				 uint64_t tiling_flags)
4740 {
4741 	/* Fill GFX8 params */
4742 	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
4743 		unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
4744 
4745 		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
4746 		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
4747 		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
4748 		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
4749 		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
4750 
4751 		/* XXX fix me for VI */
4752 		tiling_info->gfx8.num_banks = num_banks;
4753 		tiling_info->gfx8.array_mode =
4754 				DC_ARRAY_2D_TILED_THIN1;
4755 		tiling_info->gfx8.tile_split = tile_split;
4756 		tiling_info->gfx8.bank_width = bankw;
4757 		tiling_info->gfx8.bank_height = bankh;
4758 		tiling_info->gfx8.tile_aspect = mtaspect;
4759 		tiling_info->gfx8.tile_mode =
4760 				DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4761 	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4762 			== DC_ARRAY_1D_TILED_THIN1) {
4763 		tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
4764 	}
4765 
4766 	tiling_info->gfx8.pipe_config =
4767 			AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
4768 }
4769 
4770 static void
4771 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4772 				  union dc_tiling_info *tiling_info)
4773 {
4774 	tiling_info->gfx9.num_pipes =
4775 		adev->gfx.config.gb_addr_config_fields.num_pipes;
4776 	tiling_info->gfx9.num_banks =
4777 		adev->gfx.config.gb_addr_config_fields.num_banks;
4778 	tiling_info->gfx9.pipe_interleave =
4779 		adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4780 	tiling_info->gfx9.num_shader_engines =
4781 		adev->gfx.config.gb_addr_config_fields.num_se;
4782 	tiling_info->gfx9.max_compressed_frags =
4783 		adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4784 	tiling_info->gfx9.num_rb_per_se =
4785 		adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4786 	tiling_info->gfx9.shaderEnable = 1;
4787 	if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
4788 		tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
4789 }
4790 
4791 static int
4792 validate_dcc(struct amdgpu_device *adev,
4793 	     const enum surface_pixel_format format,
4794 	     const enum dc_rotation_angle rotation,
4795 	     const union dc_tiling_info *tiling_info,
4796 	     const struct dc_plane_dcc_param *dcc,
4797 	     const struct dc_plane_address *address,
4798 	     const struct plane_size *plane_size)
4799 {
4800 	struct dc *dc = adev->dm.dc;
4801 	struct dc_dcc_surface_param input;
4802 	struct dc_surface_dcc_cap output;
4803 
4804 	memset(&input, 0, sizeof(input));
4805 	memset(&output, 0, sizeof(output));
4806 
4807 	if (!dcc->enable)
4808 		return 0;
4809 
4810 	if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4811 	    !dc->cap_funcs.get_dcc_compression_cap)
4812 		return -EINVAL;
4813 
4814 	input.format = format;
4815 	input.surface_size.width = plane_size->surface_size.width;
4816 	input.surface_size.height = plane_size->surface_size.height;
4817 	input.swizzle_mode = tiling_info->gfx9.swizzle;
4818 
4819 	if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
4820 		input.scan = SCAN_DIRECTION_HORIZONTAL;
4821 	else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
4822 		input.scan = SCAN_DIRECTION_VERTICAL;
4823 
4824 	if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
4825 		return -EINVAL;
4826 
4827 	if (!output.capable)
4828 		return -EINVAL;
4829 
4830 	if (dcc->independent_64b_blks == 0 &&
4831 	    output.grph.rgb.independent_64b_blks != 0)
4832 		return -EINVAL;
4833 
4834 	return 0;
4835 }
4836 
4837 static bool
4838 modifier_has_dcc(uint64_t modifier)
4839 {
4840 	return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4841 }
4842 
4843 static unsigned
4844 modifier_gfx9_swizzle_mode(uint64_t modifier)
4845 {
4846 	if (modifier == DRM_FORMAT_MOD_LINEAR)
4847 		return 0;
4848 
4849 	return AMD_FMT_MOD_GET(TILE, modifier);
4850 }
4851 
4852 static const struct drm_format_info *
4853 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4854 {
4855 	return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
4856 }
4857 
4858 static void
4859 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4860 				    union dc_tiling_info *tiling_info,
4861 				    uint64_t modifier)
4862 {
4863 	unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4864 	unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4865 	unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4866 	unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4867 
4868 	fill_gfx9_tiling_info_from_device(adev, tiling_info);
4869 
4870 	if (!IS_AMD_FMT_MOD(modifier))
4871 		return;
4872 
4873 	tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4874 	tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4875 
4876 	if (adev->family >= AMDGPU_FAMILY_NV) {
4877 		tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4878 	} else {
4879 		tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4880 
4881 		/* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4882 	}
4883 }
4884 
4885 enum dm_micro_swizzle {
4886 	MICRO_SWIZZLE_Z = 0,
4887 	MICRO_SWIZZLE_S = 1,
4888 	MICRO_SWIZZLE_D = 2,
4889 	MICRO_SWIZZLE_R = 3
4890 };
4891 
4892 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4893 					  uint32_t format,
4894 					  uint64_t modifier)
4895 {
4896 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
4897 	const struct drm_format_info *info = drm_format_info(format);
4898 	int i;
4899 
4900 	enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4901 
4902 	if (!info)
4903 		return false;
4904 
4905 	/*
4906 	 * We always have to allow these modifiers:
4907 	 * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers.
4908 	 * 2. Not passing any modifiers is the same as explicitly passing INVALID.
4909 	 */
4910 	if (modifier == DRM_FORMAT_MOD_LINEAR ||
4911 	    modifier == DRM_FORMAT_MOD_INVALID) {
4912 		return true;
4913 	}
4914 
4915 	/* Check that the modifier is on the list of the plane's supported modifiers. */
4916 	for (i = 0; i < plane->modifier_count; i++) {
4917 		if (modifier == plane->modifiers[i])
4918 			break;
4919 	}
4920 	if (i == plane->modifier_count)
4921 		return false;
4922 
4923 	/*
4924 	 * For D swizzle the canonical modifier depends on the bpp, so check
4925 	 * it here.
4926 	 */
4927 	if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4928 	    adev->family >= AMDGPU_FAMILY_NV) {
4929 		if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4930 			return false;
4931 	}
4932 
4933 	if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4934 	    info->cpp[0] < 8)
4935 		return false;
4936 
4937 	if (modifier_has_dcc(modifier)) {
4938 		/* Per radeonsi comments 16/64 bpp are more complicated. */
4939 		if (info->cpp[0] != 4)
4940 			return false;
4941 		/* We support multi-planar formats, but not when combined with
4942 		 * additional DCC metadata planes. */
4943 		if (info->num_planes > 1)
4944 			return false;
4945 	}
4946 
4947 	return true;
4948 }
4949 
4950 static void
4951 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4952 {
4953 	if (!*mods)
4954 		return;
4955 
4956 	if (*cap - *size < 1) {
4957 		uint64_t new_cap = *cap * 2;
4958 		uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4959 
4960 		if (!new_mods) {
4961 			kfree(*mods);
4962 			*mods = NULL;
4963 			return;
4964 		}
4965 
4966 		memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4967 		kfree(*mods);
4968 		*mods = new_mods;
4969 		*cap = new_cap;
4970 	}
4971 
4972 	(*mods)[*size] = mod;
4973 	*size += 1;
4974 }
4975 
4976 static void
4977 add_gfx9_modifiers(const struct amdgpu_device *adev,
4978 		   uint64_t **mods, uint64_t *size, uint64_t *capacity)
4979 {
4980 	int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4981 	int pipe_xor_bits = min(8, pipes +
4982 				ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4983 	int bank_xor_bits = min(8 - pipe_xor_bits,
4984 				ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4985 	int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4986 		 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4987 
4988 
4989 	if (adev->family == AMDGPU_FAMILY_RV) {
4990 		/* Raven2 and later */
4991 		bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4992 
4993 		/*
4994 		 * No _D DCC swizzles yet because we only allow 32bpp, which
4995 		 * doesn't support _D on DCN
4996 		 */
4997 
4998 		if (has_constant_encode) {
4999 			add_modifier(mods, size, capacity, AMD_FMT_MOD |
5000 				    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5001 				    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5002 				    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5003 				    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5004 				    AMD_FMT_MOD_SET(DCC, 1) |
5005 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5006 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5007 				    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
5008 		}
5009 
5010 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
5011 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5012 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5013 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5014 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5015 			    AMD_FMT_MOD_SET(DCC, 1) |
5016 			    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5017 			    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5018 			    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
5019 
5020 		if (has_constant_encode) {
5021 			add_modifier(mods, size, capacity, AMD_FMT_MOD |
5022 				    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5023 				    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5024 				    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5025 				    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5026 				    AMD_FMT_MOD_SET(DCC, 1) |
5027 				    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5028 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5029 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5030 
5031 				    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5032 				    AMD_FMT_MOD_SET(RB, rb) |
5033 				    AMD_FMT_MOD_SET(PIPE, pipes));
5034 		}
5035 
5036 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
5037 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5038 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5039 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5040 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5041 			    AMD_FMT_MOD_SET(DCC, 1) |
5042 			    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5043 			    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5044 			    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5045 			    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
5046 			    AMD_FMT_MOD_SET(RB, rb) |
5047 			    AMD_FMT_MOD_SET(PIPE, pipes));
5048 	}
5049 
5050 	/*
5051 	 * Only supported for 64bpp on Raven, will be filtered on format in
5052 	 * dm_plane_format_mod_supported.
5053 	 */
5054 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5055 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
5056 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5057 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5058 		    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
5059 
5060 	if (adev->family == AMDGPU_FAMILY_RV) {
5061 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
5062 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5063 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5064 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5065 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
5066 	}
5067 
5068 	/*
5069 	 * Only supported for 64bpp on Raven, will be filtered on format in
5070 	 * dm_plane_format_mod_supported.
5071 	 */
5072 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5073 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5074 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5075 
5076 	if (adev->family == AMDGPU_FAMILY_RV) {
5077 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
5078 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5079 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5080 	}
5081 }
5082 
5083 static void
5084 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
5085 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
5086 {
5087 	int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5088 
5089 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5090 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5091 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5092 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5093 		    AMD_FMT_MOD_SET(DCC, 1) |
5094 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5095 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5096 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5097 
5098 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5099 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5100 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5101 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5102 		    AMD_FMT_MOD_SET(DCC, 1) |
5103 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5104 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5105 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5106 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5107 
5108 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5109 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5110 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5111 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5112 
5113 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5114 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5115 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5116 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5117 
5118 
5119 	/* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5120 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5121 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5122 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5123 
5124 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5125 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5126 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5127 }
5128 
5129 static void
5130 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
5131 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
5132 {
5133 	int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5134 	int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
5135 
5136 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5137 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5138 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5139 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5140 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
5141 		    AMD_FMT_MOD_SET(DCC, 1) |
5142 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5143 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5144 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5145 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5146 
5147 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5148 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5149 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5150 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5151 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
5152 		    AMD_FMT_MOD_SET(DCC, 1) |
5153 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5154 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5155 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5156 
5157 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5158 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5159 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5160 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5161 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
5162 		    AMD_FMT_MOD_SET(DCC, 1) |
5163 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5164 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5165 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5166 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5167 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5168 
5169 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5170 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5171 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5172 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5173 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
5174 		    AMD_FMT_MOD_SET(DCC, 1) |
5175 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5176 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5177 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5178 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5179 
5180 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5181 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5182 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5183 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5184 		    AMD_FMT_MOD_SET(PACKERS, pkrs));
5185 
5186 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5187 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5188 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5189 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5190 		    AMD_FMT_MOD_SET(PACKERS, pkrs));
5191 
5192 	/* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5193 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5194 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5195 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5196 
5197 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5198 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5199 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5200 }
5201 
5202 static int
5203 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
5204 {
5205 	uint64_t size = 0, capacity = 128;
5206 	*mods = NULL;
5207 
5208 	/* We have not hooked up any pre-GFX9 modifiers. */
5209 	if (adev->family < AMDGPU_FAMILY_AI)
5210 		return 0;
5211 
5212 	*mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
5213 
5214 	if (plane_type == DRM_PLANE_TYPE_CURSOR) {
5215 		add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5216 		add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5217 		return *mods ? 0 : -ENOMEM;
5218 	}
5219 
5220 	switch (adev->family) {
5221 	case AMDGPU_FAMILY_AI:
5222 	case AMDGPU_FAMILY_RV:
5223 		add_gfx9_modifiers(adev, mods, &size, &capacity);
5224 		break;
5225 	case AMDGPU_FAMILY_NV:
5226 	case AMDGPU_FAMILY_VGH:
5227 	case AMDGPU_FAMILY_YC:
5228 	case AMDGPU_FAMILY_GC_10_3_7:
5229 		if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
5230 			add_gfx10_3_modifiers(adev, mods, &size, &capacity);
5231 		else
5232 			add_gfx10_1_modifiers(adev, mods, &size, &capacity);
5233 		break;
5234 	}
5235 
5236 	add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5237 
5238 	/* INVALID marks the end of the list. */
5239 	add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5240 
5241 	if (!*mods)
5242 		return -ENOMEM;
5243 
5244 	return 0;
5245 }
5246 
5247 static int
5248 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
5249 					  const struct amdgpu_framebuffer *afb,
5250 					  const enum surface_pixel_format format,
5251 					  const enum dc_rotation_angle rotation,
5252 					  const struct plane_size *plane_size,
5253 					  union dc_tiling_info *tiling_info,
5254 					  struct dc_plane_dcc_param *dcc,
5255 					  struct dc_plane_address *address,
5256 					  const bool force_disable_dcc)
5257 {
5258 	const uint64_t modifier = afb->base.modifier;
5259 	int ret = 0;
5260 
5261 	fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
5262 	tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
5263 
5264 	if (modifier_has_dcc(modifier) && !force_disable_dcc) {
5265 		uint64_t dcc_address = afb->address + afb->base.offsets[1];
5266 		bool independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
5267 		bool independent_128b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_128B, modifier);
5268 
5269 		dcc->enable = 1;
5270 		dcc->meta_pitch = afb->base.pitches[1];
5271 		dcc->independent_64b_blks = independent_64b_blks;
5272 		if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) {
5273 			if (independent_64b_blks && independent_128b_blks)
5274 				dcc->dcc_ind_blk = hubp_ind_block_64b_no_128bcl;
5275 			else if (independent_128b_blks)
5276 				dcc->dcc_ind_blk = hubp_ind_block_128b;
5277 			else if (independent_64b_blks && !independent_128b_blks)
5278 				dcc->dcc_ind_blk = hubp_ind_block_64b;
5279 			else
5280 				dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5281 		} else {
5282 			if (independent_64b_blks)
5283 				dcc->dcc_ind_blk = hubp_ind_block_64b;
5284 			else
5285 				dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5286 		}
5287 
5288 		address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
5289 		address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
5290 	}
5291 
5292 	ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
5293 	if (ret)
5294 		drm_dbg_kms(adev_to_drm(adev), "validate_dcc: returned error: %d\n", ret);
5295 
5296 	return ret;
5297 }
5298 
5299 static int
5300 fill_plane_buffer_attributes(struct amdgpu_device *adev,
5301 			     const struct amdgpu_framebuffer *afb,
5302 			     const enum surface_pixel_format format,
5303 			     const enum dc_rotation_angle rotation,
5304 			     const uint64_t tiling_flags,
5305 			     union dc_tiling_info *tiling_info,
5306 			     struct plane_size *plane_size,
5307 			     struct dc_plane_dcc_param *dcc,
5308 			     struct dc_plane_address *address,
5309 			     bool tmz_surface,
5310 			     bool force_disable_dcc)
5311 {
5312 	const struct drm_framebuffer *fb = &afb->base;
5313 	int ret;
5314 
5315 	memset(tiling_info, 0, sizeof(*tiling_info));
5316 	memset(plane_size, 0, sizeof(*plane_size));
5317 	memset(dcc, 0, sizeof(*dcc));
5318 	memset(address, 0, sizeof(*address));
5319 
5320 	address->tmz_surface = tmz_surface;
5321 
5322 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
5323 		uint64_t addr = afb->address + fb->offsets[0];
5324 
5325 		plane_size->surface_size.x = 0;
5326 		plane_size->surface_size.y = 0;
5327 		plane_size->surface_size.width = fb->width;
5328 		plane_size->surface_size.height = fb->height;
5329 		plane_size->surface_pitch =
5330 			fb->pitches[0] / fb->format->cpp[0];
5331 
5332 		address->type = PLN_ADDR_TYPE_GRAPHICS;
5333 		address->grph.addr.low_part = lower_32_bits(addr);
5334 		address->grph.addr.high_part = upper_32_bits(addr);
5335 	} else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
5336 		uint64_t luma_addr = afb->address + fb->offsets[0];
5337 		uint64_t chroma_addr = afb->address + fb->offsets[1];
5338 
5339 		plane_size->surface_size.x = 0;
5340 		plane_size->surface_size.y = 0;
5341 		plane_size->surface_size.width = fb->width;
5342 		plane_size->surface_size.height = fb->height;
5343 		plane_size->surface_pitch =
5344 			fb->pitches[0] / fb->format->cpp[0];
5345 
5346 		plane_size->chroma_size.x = 0;
5347 		plane_size->chroma_size.y = 0;
5348 		/* TODO: set these based on surface format */
5349 		plane_size->chroma_size.width = fb->width / 2;
5350 		plane_size->chroma_size.height = fb->height / 2;
5351 
5352 		plane_size->chroma_pitch =
5353 			fb->pitches[1] / fb->format->cpp[1];
5354 
5355 		address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
5356 		address->video_progressive.luma_addr.low_part =
5357 			lower_32_bits(luma_addr);
5358 		address->video_progressive.luma_addr.high_part =
5359 			upper_32_bits(luma_addr);
5360 		address->video_progressive.chroma_addr.low_part =
5361 			lower_32_bits(chroma_addr);
5362 		address->video_progressive.chroma_addr.high_part =
5363 			upper_32_bits(chroma_addr);
5364 	}
5365 
5366 	if (adev->family >= AMDGPU_FAMILY_AI) {
5367 		ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
5368 								rotation, plane_size,
5369 								tiling_info, dcc,
5370 								address,
5371 								force_disable_dcc);
5372 		if (ret)
5373 			return ret;
5374 	} else {
5375 		fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
5376 	}
5377 
5378 	return 0;
5379 }
5380 
5381 static void
5382 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
5383 			       bool *per_pixel_alpha, bool *global_alpha,
5384 			       int *global_alpha_value)
5385 {
5386 	*per_pixel_alpha = false;
5387 	*global_alpha = false;
5388 	*global_alpha_value = 0xff;
5389 
5390 	if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
5391 		return;
5392 
5393 	if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
5394 		static const uint32_t alpha_formats[] = {
5395 			DRM_FORMAT_ARGB8888,
5396 			DRM_FORMAT_RGBA8888,
5397 			DRM_FORMAT_ABGR8888,
5398 		};
5399 		uint32_t format = plane_state->fb->format->format;
5400 		unsigned int i;
5401 
5402 		for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
5403 			if (format == alpha_formats[i]) {
5404 				*per_pixel_alpha = true;
5405 				break;
5406 			}
5407 		}
5408 	}
5409 
5410 	if (plane_state->alpha < 0xffff) {
5411 		*global_alpha = true;
5412 		*global_alpha_value = plane_state->alpha >> 8;
5413 	}
5414 }
5415 
5416 static int
5417 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
5418 			    const enum surface_pixel_format format,
5419 			    enum dc_color_space *color_space)
5420 {
5421 	bool full_range;
5422 
5423 	*color_space = COLOR_SPACE_SRGB;
5424 
5425 	/* DRM color properties only affect non-RGB formats. */
5426 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
5427 		return 0;
5428 
5429 	full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
5430 
5431 	switch (plane_state->color_encoding) {
5432 	case DRM_COLOR_YCBCR_BT601:
5433 		if (full_range)
5434 			*color_space = COLOR_SPACE_YCBCR601;
5435 		else
5436 			*color_space = COLOR_SPACE_YCBCR601_LIMITED;
5437 		break;
5438 
5439 	case DRM_COLOR_YCBCR_BT709:
5440 		if (full_range)
5441 			*color_space = COLOR_SPACE_YCBCR709;
5442 		else
5443 			*color_space = COLOR_SPACE_YCBCR709_LIMITED;
5444 		break;
5445 
5446 	case DRM_COLOR_YCBCR_BT2020:
5447 		if (full_range)
5448 			*color_space = COLOR_SPACE_2020_YCBCR;
5449 		else
5450 			return -EINVAL;
5451 		break;
5452 
5453 	default:
5454 		return -EINVAL;
5455 	}
5456 
5457 	return 0;
5458 }
5459 
5460 static int
5461 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
5462 			    const struct drm_plane_state *plane_state,
5463 			    const uint64_t tiling_flags,
5464 			    struct dc_plane_info *plane_info,
5465 			    struct dc_plane_address *address,
5466 			    bool tmz_surface,
5467 			    bool force_disable_dcc)
5468 {
5469 	const struct drm_framebuffer *fb = plane_state->fb;
5470 	const struct amdgpu_framebuffer *afb =
5471 		to_amdgpu_framebuffer(plane_state->fb);
5472 	int ret;
5473 
5474 	memset(plane_info, 0, sizeof(*plane_info));
5475 
5476 	switch (fb->format->format) {
5477 	case DRM_FORMAT_C8:
5478 		plane_info->format =
5479 			SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
5480 		break;
5481 	case DRM_FORMAT_RGB565:
5482 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
5483 		break;
5484 	case DRM_FORMAT_XRGB8888:
5485 	case DRM_FORMAT_ARGB8888:
5486 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
5487 		break;
5488 	case DRM_FORMAT_XRGB2101010:
5489 	case DRM_FORMAT_ARGB2101010:
5490 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
5491 		break;
5492 	case DRM_FORMAT_XBGR2101010:
5493 	case DRM_FORMAT_ABGR2101010:
5494 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
5495 		break;
5496 	case DRM_FORMAT_XBGR8888:
5497 	case DRM_FORMAT_ABGR8888:
5498 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
5499 		break;
5500 	case DRM_FORMAT_NV21:
5501 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
5502 		break;
5503 	case DRM_FORMAT_NV12:
5504 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
5505 		break;
5506 	case DRM_FORMAT_P010:
5507 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
5508 		break;
5509 	case DRM_FORMAT_XRGB16161616F:
5510 	case DRM_FORMAT_ARGB16161616F:
5511 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
5512 		break;
5513 	case DRM_FORMAT_XBGR16161616F:
5514 	case DRM_FORMAT_ABGR16161616F:
5515 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
5516 		break;
5517 	case DRM_FORMAT_XRGB16161616:
5518 	case DRM_FORMAT_ARGB16161616:
5519 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616;
5520 		break;
5521 	case DRM_FORMAT_XBGR16161616:
5522 	case DRM_FORMAT_ABGR16161616:
5523 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616;
5524 		break;
5525 	default:
5526 		DRM_ERROR(
5527 			"Unsupported screen format %p4cc\n",
5528 			&fb->format->format);
5529 		return -EINVAL;
5530 	}
5531 
5532 	switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
5533 	case DRM_MODE_ROTATE_0:
5534 		plane_info->rotation = ROTATION_ANGLE_0;
5535 		break;
5536 	case DRM_MODE_ROTATE_90:
5537 		plane_info->rotation = ROTATION_ANGLE_90;
5538 		break;
5539 	case DRM_MODE_ROTATE_180:
5540 		plane_info->rotation = ROTATION_ANGLE_180;
5541 		break;
5542 	case DRM_MODE_ROTATE_270:
5543 		plane_info->rotation = ROTATION_ANGLE_270;
5544 		break;
5545 	default:
5546 		plane_info->rotation = ROTATION_ANGLE_0;
5547 		break;
5548 	}
5549 
5550 	plane_info->visible = true;
5551 	plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
5552 
5553 	plane_info->layer_index = 0;
5554 
5555 	ret = fill_plane_color_attributes(plane_state, plane_info->format,
5556 					  &plane_info->color_space);
5557 	if (ret)
5558 		return ret;
5559 
5560 	ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
5561 					   plane_info->rotation, tiling_flags,
5562 					   &plane_info->tiling_info,
5563 					   &plane_info->plane_size,
5564 					   &plane_info->dcc, address, tmz_surface,
5565 					   force_disable_dcc);
5566 	if (ret)
5567 		return ret;
5568 
5569 	fill_blending_from_plane_state(
5570 		plane_state, &plane_info->per_pixel_alpha,
5571 		&plane_info->global_alpha, &plane_info->global_alpha_value);
5572 
5573 	return 0;
5574 }
5575 
5576 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
5577 				    struct dc_plane_state *dc_plane_state,
5578 				    struct drm_plane_state *plane_state,
5579 				    struct drm_crtc_state *crtc_state)
5580 {
5581 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
5582 	struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
5583 	struct dc_scaling_info scaling_info;
5584 	struct dc_plane_info plane_info;
5585 	int ret;
5586 	bool force_disable_dcc = false;
5587 
5588 	ret = fill_dc_scaling_info(adev, plane_state, &scaling_info);
5589 	if (ret)
5590 		return ret;
5591 
5592 	dc_plane_state->src_rect = scaling_info.src_rect;
5593 	dc_plane_state->dst_rect = scaling_info.dst_rect;
5594 	dc_plane_state->clip_rect = scaling_info.clip_rect;
5595 	dc_plane_state->scaling_quality = scaling_info.scaling_quality;
5596 
5597 	force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
5598 	ret = fill_dc_plane_info_and_addr(adev, plane_state,
5599 					  afb->tiling_flags,
5600 					  &plane_info,
5601 					  &dc_plane_state->address,
5602 					  afb->tmz_surface,
5603 					  force_disable_dcc);
5604 	if (ret)
5605 		return ret;
5606 
5607 	dc_plane_state->format = plane_info.format;
5608 	dc_plane_state->color_space = plane_info.color_space;
5609 	dc_plane_state->format = plane_info.format;
5610 	dc_plane_state->plane_size = plane_info.plane_size;
5611 	dc_plane_state->rotation = plane_info.rotation;
5612 	dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
5613 	dc_plane_state->stereo_format = plane_info.stereo_format;
5614 	dc_plane_state->tiling_info = plane_info.tiling_info;
5615 	dc_plane_state->visible = plane_info.visible;
5616 	dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
5617 	dc_plane_state->global_alpha = plane_info.global_alpha;
5618 	dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
5619 	dc_plane_state->dcc = plane_info.dcc;
5620 	dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
5621 	dc_plane_state->flip_int_enabled = true;
5622 
5623 	/*
5624 	 * Always set input transfer function, since plane state is refreshed
5625 	 * every time.
5626 	 */
5627 	ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
5628 	if (ret)
5629 		return ret;
5630 
5631 	return 0;
5632 }
5633 
5634 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
5635 					   const struct dm_connector_state *dm_state,
5636 					   struct dc_stream_state *stream)
5637 {
5638 	enum amdgpu_rmx_type rmx_type;
5639 
5640 	struct rect src = { 0 }; /* viewport in composition space*/
5641 	struct rect dst = { 0 }; /* stream addressable area */
5642 
5643 	/* no mode. nothing to be done */
5644 	if (!mode)
5645 		return;
5646 
5647 	/* Full screen scaling by default */
5648 	src.width = mode->hdisplay;
5649 	src.height = mode->vdisplay;
5650 	dst.width = stream->timing.h_addressable;
5651 	dst.height = stream->timing.v_addressable;
5652 
5653 	if (dm_state) {
5654 		rmx_type = dm_state->scaling;
5655 		if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
5656 			if (src.width * dst.height <
5657 					src.height * dst.width) {
5658 				/* height needs less upscaling/more downscaling */
5659 				dst.width = src.width *
5660 						dst.height / src.height;
5661 			} else {
5662 				/* width needs less upscaling/more downscaling */
5663 				dst.height = src.height *
5664 						dst.width / src.width;
5665 			}
5666 		} else if (rmx_type == RMX_CENTER) {
5667 			dst = src;
5668 		}
5669 
5670 		dst.x = (stream->timing.h_addressable - dst.width) / 2;
5671 		dst.y = (stream->timing.v_addressable - dst.height) / 2;
5672 
5673 		if (dm_state->underscan_enable) {
5674 			dst.x += dm_state->underscan_hborder / 2;
5675 			dst.y += dm_state->underscan_vborder / 2;
5676 			dst.width -= dm_state->underscan_hborder;
5677 			dst.height -= dm_state->underscan_vborder;
5678 		}
5679 	}
5680 
5681 	stream->src = src;
5682 	stream->dst = dst;
5683 
5684 	DRM_DEBUG_KMS("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
5685 		      dst.x, dst.y, dst.width, dst.height);
5686 
5687 }
5688 
5689 static enum dc_color_depth
5690 convert_color_depth_from_display_info(const struct drm_connector *connector,
5691 				      bool is_y420, int requested_bpc)
5692 {
5693 	uint8_t bpc;
5694 
5695 	if (is_y420) {
5696 		bpc = 8;
5697 
5698 		/* Cap display bpc based on HDMI 2.0 HF-VSDB */
5699 		if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
5700 			bpc = 16;
5701 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
5702 			bpc = 12;
5703 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
5704 			bpc = 10;
5705 	} else {
5706 		bpc = (uint8_t)connector->display_info.bpc;
5707 		/* Assume 8 bpc by default if no bpc is specified. */
5708 		bpc = bpc ? bpc : 8;
5709 	}
5710 
5711 	if (requested_bpc > 0) {
5712 		/*
5713 		 * Cap display bpc based on the user requested value.
5714 		 *
5715 		 * The value for state->max_bpc may not correctly updated
5716 		 * depending on when the connector gets added to the state
5717 		 * or if this was called outside of atomic check, so it
5718 		 * can't be used directly.
5719 		 */
5720 		bpc = min_t(u8, bpc, requested_bpc);
5721 
5722 		/* Round down to the nearest even number. */
5723 		bpc = bpc - (bpc & 1);
5724 	}
5725 
5726 	switch (bpc) {
5727 	case 0:
5728 		/*
5729 		 * Temporary Work around, DRM doesn't parse color depth for
5730 		 * EDID revision before 1.4
5731 		 * TODO: Fix edid parsing
5732 		 */
5733 		return COLOR_DEPTH_888;
5734 	case 6:
5735 		return COLOR_DEPTH_666;
5736 	case 8:
5737 		return COLOR_DEPTH_888;
5738 	case 10:
5739 		return COLOR_DEPTH_101010;
5740 	case 12:
5741 		return COLOR_DEPTH_121212;
5742 	case 14:
5743 		return COLOR_DEPTH_141414;
5744 	case 16:
5745 		return COLOR_DEPTH_161616;
5746 	default:
5747 		return COLOR_DEPTH_UNDEFINED;
5748 	}
5749 }
5750 
5751 static enum dc_aspect_ratio
5752 get_aspect_ratio(const struct drm_display_mode *mode_in)
5753 {
5754 	/* 1-1 mapping, since both enums follow the HDMI spec. */
5755 	return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
5756 }
5757 
5758 static enum dc_color_space
5759 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
5760 {
5761 	enum dc_color_space color_space = COLOR_SPACE_SRGB;
5762 
5763 	switch (dc_crtc_timing->pixel_encoding)	{
5764 	case PIXEL_ENCODING_YCBCR422:
5765 	case PIXEL_ENCODING_YCBCR444:
5766 	case PIXEL_ENCODING_YCBCR420:
5767 	{
5768 		/*
5769 		 * 27030khz is the separation point between HDTV and SDTV
5770 		 * according to HDMI spec, we use YCbCr709 and YCbCr601
5771 		 * respectively
5772 		 */
5773 		if (dc_crtc_timing->pix_clk_100hz > 270300) {
5774 			if (dc_crtc_timing->flags.Y_ONLY)
5775 				color_space =
5776 					COLOR_SPACE_YCBCR709_LIMITED;
5777 			else
5778 				color_space = COLOR_SPACE_YCBCR709;
5779 		} else {
5780 			if (dc_crtc_timing->flags.Y_ONLY)
5781 				color_space =
5782 					COLOR_SPACE_YCBCR601_LIMITED;
5783 			else
5784 				color_space = COLOR_SPACE_YCBCR601;
5785 		}
5786 
5787 	}
5788 	break;
5789 	case PIXEL_ENCODING_RGB:
5790 		color_space = COLOR_SPACE_SRGB;
5791 		break;
5792 
5793 	default:
5794 		WARN_ON(1);
5795 		break;
5796 	}
5797 
5798 	return color_space;
5799 }
5800 
5801 static bool adjust_colour_depth_from_display_info(
5802 	struct dc_crtc_timing *timing_out,
5803 	const struct drm_display_info *info)
5804 {
5805 	enum dc_color_depth depth = timing_out->display_color_depth;
5806 	int normalized_clk;
5807 	do {
5808 		normalized_clk = timing_out->pix_clk_100hz / 10;
5809 		/* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5810 		if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5811 			normalized_clk /= 2;
5812 		/* Adjusting pix clock following on HDMI spec based on colour depth */
5813 		switch (depth) {
5814 		case COLOR_DEPTH_888:
5815 			break;
5816 		case COLOR_DEPTH_101010:
5817 			normalized_clk = (normalized_clk * 30) / 24;
5818 			break;
5819 		case COLOR_DEPTH_121212:
5820 			normalized_clk = (normalized_clk * 36) / 24;
5821 			break;
5822 		case COLOR_DEPTH_161616:
5823 			normalized_clk = (normalized_clk * 48) / 24;
5824 			break;
5825 		default:
5826 			/* The above depths are the only ones valid for HDMI. */
5827 			return false;
5828 		}
5829 		if (normalized_clk <= info->max_tmds_clock) {
5830 			timing_out->display_color_depth = depth;
5831 			return true;
5832 		}
5833 	} while (--depth > COLOR_DEPTH_666);
5834 	return false;
5835 }
5836 
5837 static void fill_stream_properties_from_drm_display_mode(
5838 	struct dc_stream_state *stream,
5839 	const struct drm_display_mode *mode_in,
5840 	const struct drm_connector *connector,
5841 	const struct drm_connector_state *connector_state,
5842 	const struct dc_stream_state *old_stream,
5843 	int requested_bpc)
5844 {
5845 	struct dc_crtc_timing *timing_out = &stream->timing;
5846 	const struct drm_display_info *info = &connector->display_info;
5847 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5848 	struct hdmi_vendor_infoframe hv_frame;
5849 	struct hdmi_avi_infoframe avi_frame;
5850 
5851 	memset(&hv_frame, 0, sizeof(hv_frame));
5852 	memset(&avi_frame, 0, sizeof(avi_frame));
5853 
5854 	timing_out->h_border_left = 0;
5855 	timing_out->h_border_right = 0;
5856 	timing_out->v_border_top = 0;
5857 	timing_out->v_border_bottom = 0;
5858 	/* TODO: un-hardcode */
5859 	if (drm_mode_is_420_only(info, mode_in)
5860 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5861 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5862 	else if (drm_mode_is_420_also(info, mode_in)
5863 			&& aconnector->force_yuv420_output)
5864 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5865 	else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCBCR444)
5866 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5867 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5868 	else
5869 		timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5870 
5871 	timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5872 	timing_out->display_color_depth = convert_color_depth_from_display_info(
5873 		connector,
5874 		(timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5875 		requested_bpc);
5876 	timing_out->scan_type = SCANNING_TYPE_NODATA;
5877 	timing_out->hdmi_vic = 0;
5878 
5879 	if(old_stream) {
5880 		timing_out->vic = old_stream->timing.vic;
5881 		timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5882 		timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5883 	} else {
5884 		timing_out->vic = drm_match_cea_mode(mode_in);
5885 		if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5886 			timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5887 		if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5888 			timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5889 	}
5890 
5891 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5892 		drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5893 		timing_out->vic = avi_frame.video_code;
5894 		drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5895 		timing_out->hdmi_vic = hv_frame.vic;
5896 	}
5897 
5898 	if (is_freesync_video_mode(mode_in, aconnector)) {
5899 		timing_out->h_addressable = mode_in->hdisplay;
5900 		timing_out->h_total = mode_in->htotal;
5901 		timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5902 		timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5903 		timing_out->v_total = mode_in->vtotal;
5904 		timing_out->v_addressable = mode_in->vdisplay;
5905 		timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5906 		timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5907 		timing_out->pix_clk_100hz = mode_in->clock * 10;
5908 	} else {
5909 		timing_out->h_addressable = mode_in->crtc_hdisplay;
5910 		timing_out->h_total = mode_in->crtc_htotal;
5911 		timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5912 		timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5913 		timing_out->v_total = mode_in->crtc_vtotal;
5914 		timing_out->v_addressable = mode_in->crtc_vdisplay;
5915 		timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5916 		timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5917 		timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5918 	}
5919 
5920 	timing_out->aspect_ratio = get_aspect_ratio(mode_in);
5921 
5922 	stream->output_color_space = get_output_color_space(timing_out);
5923 
5924 	stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5925 	stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
5926 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5927 		if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5928 		    drm_mode_is_420_also(info, mode_in) &&
5929 		    timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5930 			timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5931 			adjust_colour_depth_from_display_info(timing_out, info);
5932 		}
5933 	}
5934 }
5935 
5936 static void fill_audio_info(struct audio_info *audio_info,
5937 			    const struct drm_connector *drm_connector,
5938 			    const struct dc_sink *dc_sink)
5939 {
5940 	int i = 0;
5941 	int cea_revision = 0;
5942 	const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5943 
5944 	audio_info->manufacture_id = edid_caps->manufacturer_id;
5945 	audio_info->product_id = edid_caps->product_id;
5946 
5947 	cea_revision = drm_connector->display_info.cea_rev;
5948 
5949 	strscpy(audio_info->display_name,
5950 		edid_caps->display_name,
5951 		AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
5952 
5953 	if (cea_revision >= 3) {
5954 		audio_info->mode_count = edid_caps->audio_mode_count;
5955 
5956 		for (i = 0; i < audio_info->mode_count; ++i) {
5957 			audio_info->modes[i].format_code =
5958 					(enum audio_format_code)
5959 					(edid_caps->audio_modes[i].format_code);
5960 			audio_info->modes[i].channel_count =
5961 					edid_caps->audio_modes[i].channel_count;
5962 			audio_info->modes[i].sample_rates.all =
5963 					edid_caps->audio_modes[i].sample_rate;
5964 			audio_info->modes[i].sample_size =
5965 					edid_caps->audio_modes[i].sample_size;
5966 		}
5967 	}
5968 
5969 	audio_info->flags.all = edid_caps->speaker_flags;
5970 
5971 	/* TODO: We only check for the progressive mode, check for interlace mode too */
5972 	if (drm_connector->latency_present[0]) {
5973 		audio_info->video_latency = drm_connector->video_latency[0];
5974 		audio_info->audio_latency = drm_connector->audio_latency[0];
5975 	}
5976 
5977 	/* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5978 
5979 }
5980 
5981 static void
5982 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5983 				      struct drm_display_mode *dst_mode)
5984 {
5985 	dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5986 	dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5987 	dst_mode->crtc_clock = src_mode->crtc_clock;
5988 	dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5989 	dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
5990 	dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
5991 	dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5992 	dst_mode->crtc_htotal = src_mode->crtc_htotal;
5993 	dst_mode->crtc_hskew = src_mode->crtc_hskew;
5994 	dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5995 	dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5996 	dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5997 	dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5998 	dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5999 }
6000 
6001 static void
6002 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
6003 					const struct drm_display_mode *native_mode,
6004 					bool scale_enabled)
6005 {
6006 	if (scale_enabled) {
6007 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
6008 	} else if (native_mode->clock == drm_mode->clock &&
6009 			native_mode->htotal == drm_mode->htotal &&
6010 			native_mode->vtotal == drm_mode->vtotal) {
6011 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
6012 	} else {
6013 		/* no scaling nor amdgpu inserted, no need to patch */
6014 	}
6015 }
6016 
6017 static struct dc_sink *
6018 create_fake_sink(struct amdgpu_dm_connector *aconnector)
6019 {
6020 	struct dc_sink_init_data sink_init_data = { 0 };
6021 	struct dc_sink *sink = NULL;
6022 	sink_init_data.link = aconnector->dc_link;
6023 	sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
6024 
6025 	sink = dc_sink_create(&sink_init_data);
6026 	if (!sink) {
6027 		DRM_ERROR("Failed to create sink!\n");
6028 		return NULL;
6029 	}
6030 	sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
6031 
6032 	return sink;
6033 }
6034 
6035 static void set_multisync_trigger_params(
6036 		struct dc_stream_state *stream)
6037 {
6038 	struct dc_stream_state *master = NULL;
6039 
6040 	if (stream->triggered_crtc_reset.enabled) {
6041 		master = stream->triggered_crtc_reset.event_source;
6042 		stream->triggered_crtc_reset.event =
6043 			master->timing.flags.VSYNC_POSITIVE_POLARITY ?
6044 			CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
6045 		stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
6046 	}
6047 }
6048 
6049 static void set_master_stream(struct dc_stream_state *stream_set[],
6050 			      int stream_count)
6051 {
6052 	int j, highest_rfr = 0, master_stream = 0;
6053 
6054 	for (j = 0;  j < stream_count; j++) {
6055 		if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
6056 			int refresh_rate = 0;
6057 
6058 			refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
6059 				(stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
6060 			if (refresh_rate > highest_rfr) {
6061 				highest_rfr = refresh_rate;
6062 				master_stream = j;
6063 			}
6064 		}
6065 	}
6066 	for (j = 0;  j < stream_count; j++) {
6067 		if (stream_set[j])
6068 			stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
6069 	}
6070 }
6071 
6072 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
6073 {
6074 	int i = 0;
6075 	struct dc_stream_state *stream;
6076 
6077 	if (context->stream_count < 2)
6078 		return;
6079 	for (i = 0; i < context->stream_count ; i++) {
6080 		if (!context->streams[i])
6081 			continue;
6082 		/*
6083 		 * TODO: add a function to read AMD VSDB bits and set
6084 		 * crtc_sync_master.multi_sync_enabled flag
6085 		 * For now it's set to false
6086 		 */
6087 	}
6088 
6089 	set_master_stream(context->streams, context->stream_count);
6090 
6091 	for (i = 0; i < context->stream_count ; i++) {
6092 		stream = context->streams[i];
6093 
6094 		if (!stream)
6095 			continue;
6096 
6097 		set_multisync_trigger_params(stream);
6098 	}
6099 }
6100 
6101 #if defined(CONFIG_DRM_AMD_DC_DCN)
6102 static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
6103 							struct dc_sink *sink, struct dc_stream_state *stream,
6104 							struct dsc_dec_dpcd_caps *dsc_caps)
6105 {
6106 	stream->timing.flags.DSC = 0;
6107 	dsc_caps->is_dsc_supported = false;
6108 
6109 	if (aconnector->dc_link && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT ||
6110 		sink->sink_signal == SIGNAL_TYPE_EDP)) {
6111 		if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE ||
6112 			sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
6113 			dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
6114 				aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
6115 				aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
6116 				dsc_caps);
6117 	}
6118 }
6119 
6120 static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector,
6121 				    struct dc_sink *sink, struct dc_stream_state *stream,
6122 				    struct dsc_dec_dpcd_caps *dsc_caps,
6123 				    uint32_t max_dsc_target_bpp_limit_override)
6124 {
6125 	const struct dc_link_settings *verified_link_cap = NULL;
6126 	uint32_t link_bw_in_kbps;
6127 	uint32_t edp_min_bpp_x16, edp_max_bpp_x16;
6128 	struct dc *dc = sink->ctx->dc;
6129 	struct dc_dsc_bw_range bw_range = {0};
6130 	struct dc_dsc_config dsc_cfg = {0};
6131 
6132 	verified_link_cap = dc_link_get_link_cap(stream->link);
6133 	link_bw_in_kbps = dc_link_bandwidth_kbps(stream->link, verified_link_cap);
6134 	edp_min_bpp_x16 = 8 * 16;
6135 	edp_max_bpp_x16 = 8 * 16;
6136 
6137 	if (edp_max_bpp_x16 > dsc_caps->edp_max_bits_per_pixel)
6138 		edp_max_bpp_x16 = dsc_caps->edp_max_bits_per_pixel;
6139 
6140 	if (edp_max_bpp_x16 < edp_min_bpp_x16)
6141 		edp_min_bpp_x16 = edp_max_bpp_x16;
6142 
6143 	if (dc_dsc_compute_bandwidth_range(dc->res_pool->dscs[0],
6144 				dc->debug.dsc_min_slice_height_override,
6145 				edp_min_bpp_x16, edp_max_bpp_x16,
6146 				dsc_caps,
6147 				&stream->timing,
6148 				&bw_range)) {
6149 
6150 		if (bw_range.max_kbps < link_bw_in_kbps) {
6151 			if (dc_dsc_compute_config(dc->res_pool->dscs[0],
6152 					dsc_caps,
6153 					dc->debug.dsc_min_slice_height_override,
6154 					max_dsc_target_bpp_limit_override,
6155 					0,
6156 					&stream->timing,
6157 					&dsc_cfg)) {
6158 				stream->timing.dsc_cfg = dsc_cfg;
6159 				stream->timing.flags.DSC = 1;
6160 				stream->timing.dsc_cfg.bits_per_pixel = edp_max_bpp_x16;
6161 			}
6162 			return;
6163 		}
6164 	}
6165 
6166 	if (dc_dsc_compute_config(dc->res_pool->dscs[0],
6167 				dsc_caps,
6168 				dc->debug.dsc_min_slice_height_override,
6169 				max_dsc_target_bpp_limit_override,
6170 				link_bw_in_kbps,
6171 				&stream->timing,
6172 				&dsc_cfg)) {
6173 		stream->timing.dsc_cfg = dsc_cfg;
6174 		stream->timing.flags.DSC = 1;
6175 	}
6176 }
6177 
6178 static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
6179 										struct dc_sink *sink, struct dc_stream_state *stream,
6180 										struct dsc_dec_dpcd_caps *dsc_caps)
6181 {
6182 	struct drm_connector *drm_connector = &aconnector->base;
6183 	uint32_t link_bandwidth_kbps;
6184 	uint32_t max_dsc_target_bpp_limit_override = 0;
6185 	struct dc *dc = sink->ctx->dc;
6186 	uint32_t max_supported_bw_in_kbps, timing_bw_in_kbps;
6187 	uint32_t dsc_max_supported_bw_in_kbps;
6188 
6189 	link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
6190 							dc_link_get_link_cap(aconnector->dc_link));
6191 
6192 	if (stream->link && stream->link->local_sink)
6193 		max_dsc_target_bpp_limit_override =
6194 			stream->link->local_sink->edid_caps.panel_patch.max_dsc_target_bpp_limit;
6195 
6196 	/* Set DSC policy according to dsc_clock_en */
6197 	dc_dsc_policy_set_enable_dsc_when_not_needed(
6198 		aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
6199 
6200 	if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_EDP && !dc->debug.disable_dsc_edp &&
6201 	    dc->caps.edp_dsc_support && aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE) {
6202 
6203 		apply_dsc_policy_for_edp(aconnector, sink, stream, dsc_caps, max_dsc_target_bpp_limit_override);
6204 
6205 	} else if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6206 		if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) {
6207 			if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
6208 						dsc_caps,
6209 						aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
6210 						max_dsc_target_bpp_limit_override,
6211 						link_bandwidth_kbps,
6212 						&stream->timing,
6213 						&stream->timing.dsc_cfg)) {
6214 				stream->timing.flags.DSC = 1;
6215 				DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n",
6216 								 __func__, drm_connector->name);
6217 			}
6218 		} else if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) {
6219 			timing_bw_in_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing);
6220 			max_supported_bw_in_kbps = link_bandwidth_kbps;
6221 			dsc_max_supported_bw_in_kbps = link_bandwidth_kbps;
6222 
6223 			if (timing_bw_in_kbps > max_supported_bw_in_kbps &&
6224 					max_supported_bw_in_kbps > 0 &&
6225 					dsc_max_supported_bw_in_kbps > 0)
6226 				if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
6227 						dsc_caps,
6228 						aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
6229 						max_dsc_target_bpp_limit_override,
6230 						dsc_max_supported_bw_in_kbps,
6231 						&stream->timing,
6232 						&stream->timing.dsc_cfg)) {
6233 					stream->timing.flags.DSC = 1;
6234 					DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from DP-HDMI PCON\n",
6235 									 __func__, drm_connector->name);
6236 				}
6237 		}
6238 	}
6239 
6240 	/* Overwrite the stream flag if DSC is enabled through debugfs */
6241 	if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
6242 		stream->timing.flags.DSC = 1;
6243 
6244 	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
6245 		stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
6246 
6247 	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
6248 		stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
6249 
6250 	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
6251 		stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
6252 }
6253 #endif /* CONFIG_DRM_AMD_DC_DCN */
6254 
6255 /**
6256  * DOC: FreeSync Video
6257  *
6258  * When a userspace application wants to play a video, the content follows a
6259  * standard format definition that usually specifies the FPS for that format.
6260  * The below list illustrates some video format and the expected FPS,
6261  * respectively:
6262  *
6263  * - TV/NTSC (23.976 FPS)
6264  * - Cinema (24 FPS)
6265  * - TV/PAL (25 FPS)
6266  * - TV/NTSC (29.97 FPS)
6267  * - TV/NTSC (30 FPS)
6268  * - Cinema HFR (48 FPS)
6269  * - TV/PAL (50 FPS)
6270  * - Commonly used (60 FPS)
6271  * - Multiples of 24 (48,72,96,120 FPS)
6272  *
6273  * The list of standards video format is not huge and can be added to the
6274  * connector modeset list beforehand. With that, userspace can leverage
6275  * FreeSync to extends the front porch in order to attain the target refresh
6276  * rate. Such a switch will happen seamlessly, without screen blanking or
6277  * reprogramming of the output in any other way. If the userspace requests a
6278  * modesetting change compatible with FreeSync modes that only differ in the
6279  * refresh rate, DC will skip the full update and avoid blink during the
6280  * transition. For example, the video player can change the modesetting from
6281  * 60Hz to 30Hz for playing TV/NTSC content when it goes full screen without
6282  * causing any display blink. This same concept can be applied to a mode
6283  * setting change.
6284  */
6285 static struct drm_display_mode *
6286 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
6287 			  bool use_probed_modes)
6288 {
6289 	struct drm_display_mode *m, *m_pref = NULL;
6290 	u16 current_refresh, highest_refresh;
6291 	struct list_head *list_head = use_probed_modes ?
6292 						    &aconnector->base.probed_modes :
6293 						    &aconnector->base.modes;
6294 
6295 	if (aconnector->freesync_vid_base.clock != 0)
6296 		return &aconnector->freesync_vid_base;
6297 
6298 	/* Find the preferred mode */
6299 	list_for_each_entry (m, list_head, head) {
6300 		if (m->type & DRM_MODE_TYPE_PREFERRED) {
6301 			m_pref = m;
6302 			break;
6303 		}
6304 	}
6305 
6306 	if (!m_pref) {
6307 		/* Probably an EDID with no preferred mode. Fallback to first entry */
6308 		m_pref = list_first_entry_or_null(
6309 			&aconnector->base.modes, struct drm_display_mode, head);
6310 		if (!m_pref) {
6311 			DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
6312 			return NULL;
6313 		}
6314 	}
6315 
6316 	highest_refresh = drm_mode_vrefresh(m_pref);
6317 
6318 	/*
6319 	 * Find the mode with highest refresh rate with same resolution.
6320 	 * For some monitors, preferred mode is not the mode with highest
6321 	 * supported refresh rate.
6322 	 */
6323 	list_for_each_entry (m, list_head, head) {
6324 		current_refresh  = drm_mode_vrefresh(m);
6325 
6326 		if (m->hdisplay == m_pref->hdisplay &&
6327 		    m->vdisplay == m_pref->vdisplay &&
6328 		    highest_refresh < current_refresh) {
6329 			highest_refresh = current_refresh;
6330 			m_pref = m;
6331 		}
6332 	}
6333 
6334 	aconnector->freesync_vid_base = *m_pref;
6335 	return m_pref;
6336 }
6337 
6338 static bool is_freesync_video_mode(const struct drm_display_mode *mode,
6339 				   struct amdgpu_dm_connector *aconnector)
6340 {
6341 	struct drm_display_mode *high_mode;
6342 	int timing_diff;
6343 
6344 	high_mode = get_highest_refresh_rate_mode(aconnector, false);
6345 	if (!high_mode || !mode)
6346 		return false;
6347 
6348 	timing_diff = high_mode->vtotal - mode->vtotal;
6349 
6350 	if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
6351 	    high_mode->hdisplay != mode->hdisplay ||
6352 	    high_mode->vdisplay != mode->vdisplay ||
6353 	    high_mode->hsync_start != mode->hsync_start ||
6354 	    high_mode->hsync_end != mode->hsync_end ||
6355 	    high_mode->htotal != mode->htotal ||
6356 	    high_mode->hskew != mode->hskew ||
6357 	    high_mode->vscan != mode->vscan ||
6358 	    high_mode->vsync_start - mode->vsync_start != timing_diff ||
6359 	    high_mode->vsync_end - mode->vsync_end != timing_diff)
6360 		return false;
6361 	else
6362 		return true;
6363 }
6364 
6365 struct dc_stream_state *
6366 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6367 		       const struct drm_display_mode *drm_mode,
6368 		       const struct dm_connector_state *dm_state,
6369 		       const struct dc_stream_state *old_stream,
6370 		       int requested_bpc)
6371 {
6372 	struct drm_display_mode *preferred_mode = NULL;
6373 	struct drm_connector *drm_connector;
6374 	const struct drm_connector_state *con_state =
6375 		dm_state ? &dm_state->base : NULL;
6376 	struct dc_stream_state *stream = NULL;
6377 	struct drm_display_mode mode = *drm_mode;
6378 	struct drm_display_mode saved_mode;
6379 	struct drm_display_mode *freesync_mode = NULL;
6380 	bool native_mode_found = false;
6381 	bool recalculate_timing = false;
6382 	bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
6383 	int mode_refresh;
6384 	int preferred_refresh = 0;
6385 #if defined(CONFIG_DRM_AMD_DC_DCN)
6386 	struct dsc_dec_dpcd_caps dsc_caps;
6387 #endif
6388 	struct dc_sink *sink = NULL;
6389 
6390 	memset(&saved_mode, 0, sizeof(saved_mode));
6391 
6392 	if (aconnector == NULL) {
6393 		DRM_ERROR("aconnector is NULL!\n");
6394 		return stream;
6395 	}
6396 
6397 	drm_connector = &aconnector->base;
6398 
6399 	if (!aconnector->dc_sink) {
6400 		sink = create_fake_sink(aconnector);
6401 		if (!sink)
6402 			return stream;
6403 	} else {
6404 		sink = aconnector->dc_sink;
6405 		dc_sink_retain(sink);
6406 	}
6407 
6408 	stream = dc_create_stream_for_sink(sink);
6409 
6410 	if (stream == NULL) {
6411 		DRM_ERROR("Failed to create stream for sink!\n");
6412 		goto finish;
6413 	}
6414 
6415 	stream->dm_stream_context = aconnector;
6416 
6417 	stream->timing.flags.LTE_340MCSC_SCRAMBLE =
6418 		drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
6419 
6420 	list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
6421 		/* Search for preferred mode */
6422 		if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
6423 			native_mode_found = true;
6424 			break;
6425 		}
6426 	}
6427 	if (!native_mode_found)
6428 		preferred_mode = list_first_entry_or_null(
6429 				&aconnector->base.modes,
6430 				struct drm_display_mode,
6431 				head);
6432 
6433 	mode_refresh = drm_mode_vrefresh(&mode);
6434 
6435 	if (preferred_mode == NULL) {
6436 		/*
6437 		 * This may not be an error, the use case is when we have no
6438 		 * usermode calls to reset and set mode upon hotplug. In this
6439 		 * case, we call set mode ourselves to restore the previous mode
6440 		 * and the modelist may not be filled in in time.
6441 		 */
6442 		DRM_DEBUG_DRIVER("No preferred mode found\n");
6443 	} else {
6444 		recalculate_timing = is_freesync_video_mode(&mode, aconnector);
6445 		if (recalculate_timing) {
6446 			freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
6447 			saved_mode = mode;
6448 			mode = *freesync_mode;
6449 		} else {
6450 			decide_crtc_timing_for_drm_display_mode(
6451 				&mode, preferred_mode, scale);
6452 
6453 			preferred_refresh = drm_mode_vrefresh(preferred_mode);
6454 		}
6455 	}
6456 
6457 	if (recalculate_timing)
6458 		drm_mode_set_crtcinfo(&saved_mode, 0);
6459 	else if (!dm_state)
6460 		drm_mode_set_crtcinfo(&mode, 0);
6461 
6462        /*
6463 	* If scaling is enabled and refresh rate didn't change
6464 	* we copy the vic and polarities of the old timings
6465 	*/
6466 	if (!scale || mode_refresh != preferred_refresh)
6467 		fill_stream_properties_from_drm_display_mode(
6468 			stream, &mode, &aconnector->base, con_state, NULL,
6469 			requested_bpc);
6470 	else
6471 		fill_stream_properties_from_drm_display_mode(
6472 			stream, &mode, &aconnector->base, con_state, old_stream,
6473 			requested_bpc);
6474 
6475 #if defined(CONFIG_DRM_AMD_DC_DCN)
6476 	/* SST DSC determination policy */
6477 	update_dsc_caps(aconnector, sink, stream, &dsc_caps);
6478 	if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported)
6479 		apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps);
6480 #endif
6481 
6482 	update_stream_scaling_settings(&mode, dm_state, stream);
6483 
6484 	fill_audio_info(
6485 		&stream->audio_info,
6486 		drm_connector,
6487 		sink);
6488 
6489 	update_stream_signal(stream, sink);
6490 
6491 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
6492 		mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
6493 
6494 	if (stream->link->psr_settings.psr_feature_enabled) {
6495 		//
6496 		// should decide stream support vsc sdp colorimetry capability
6497 		// before building vsc info packet
6498 		//
6499 		stream->use_vsc_sdp_for_colorimetry = false;
6500 		if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
6501 			stream->use_vsc_sdp_for_colorimetry =
6502 				aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
6503 		} else {
6504 			if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
6505 				stream->use_vsc_sdp_for_colorimetry = true;
6506 		}
6507 		mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space);
6508 		aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
6509 
6510 	}
6511 finish:
6512 	dc_sink_release(sink);
6513 
6514 	return stream;
6515 }
6516 
6517 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
6518 {
6519 	drm_crtc_cleanup(crtc);
6520 	kfree(crtc);
6521 }
6522 
6523 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
6524 				  struct drm_crtc_state *state)
6525 {
6526 	struct dm_crtc_state *cur = to_dm_crtc_state(state);
6527 
6528 	/* TODO Destroy dc_stream objects are stream object is flattened */
6529 	if (cur->stream)
6530 		dc_stream_release(cur->stream);
6531 
6532 
6533 	__drm_atomic_helper_crtc_destroy_state(state);
6534 
6535 
6536 	kfree(state);
6537 }
6538 
6539 static void dm_crtc_reset_state(struct drm_crtc *crtc)
6540 {
6541 	struct dm_crtc_state *state;
6542 
6543 	if (crtc->state)
6544 		dm_crtc_destroy_state(crtc, crtc->state);
6545 
6546 	state = kzalloc(sizeof(*state), GFP_KERNEL);
6547 	if (WARN_ON(!state))
6548 		return;
6549 
6550 	__drm_atomic_helper_crtc_reset(crtc, &state->base);
6551 }
6552 
6553 static struct drm_crtc_state *
6554 dm_crtc_duplicate_state(struct drm_crtc *crtc)
6555 {
6556 	struct dm_crtc_state *state, *cur;
6557 
6558 	cur = to_dm_crtc_state(crtc->state);
6559 
6560 	if (WARN_ON(!crtc->state))
6561 		return NULL;
6562 
6563 	state = kzalloc(sizeof(*state), GFP_KERNEL);
6564 	if (!state)
6565 		return NULL;
6566 
6567 	__drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
6568 
6569 	if (cur->stream) {
6570 		state->stream = cur->stream;
6571 		dc_stream_retain(state->stream);
6572 	}
6573 
6574 	state->active_planes = cur->active_planes;
6575 	state->vrr_infopacket = cur->vrr_infopacket;
6576 	state->abm_level = cur->abm_level;
6577 	state->vrr_supported = cur->vrr_supported;
6578 	state->freesync_config = cur->freesync_config;
6579 	state->cm_has_degamma = cur->cm_has_degamma;
6580 	state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
6581 	state->force_dpms_off = cur->force_dpms_off;
6582 	/* TODO Duplicate dc_stream after objects are stream object is flattened */
6583 
6584 	return &state->base;
6585 }
6586 
6587 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
6588 static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
6589 {
6590 	crtc_debugfs_init(crtc);
6591 
6592 	return 0;
6593 }
6594 #endif
6595 
6596 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
6597 {
6598 	enum dc_irq_source irq_source;
6599 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6600 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6601 	int rc;
6602 
6603 	irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
6604 
6605 	rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
6606 
6607 	DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
6608 		      acrtc->crtc_id, enable ? "en" : "dis", rc);
6609 	return rc;
6610 }
6611 
6612 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
6613 {
6614 	enum dc_irq_source irq_source;
6615 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6616 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6617 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
6618 #if defined(CONFIG_DRM_AMD_DC_DCN)
6619 	struct amdgpu_display_manager *dm = &adev->dm;
6620 	struct vblank_control_work *work;
6621 #endif
6622 	int rc = 0;
6623 
6624 	if (enable) {
6625 		/* vblank irq on -> Only need vupdate irq in vrr mode */
6626 		if (amdgpu_dm_vrr_active(acrtc_state))
6627 			rc = dm_set_vupdate_irq(crtc, true);
6628 	} else {
6629 		/* vblank irq off -> vupdate irq off */
6630 		rc = dm_set_vupdate_irq(crtc, false);
6631 	}
6632 
6633 	if (rc)
6634 		return rc;
6635 
6636 	irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
6637 
6638 	if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
6639 		return -EBUSY;
6640 
6641 	if (amdgpu_in_reset(adev))
6642 		return 0;
6643 
6644 #if defined(CONFIG_DRM_AMD_DC_DCN)
6645 	if (dm->vblank_control_workqueue) {
6646 		work = kzalloc(sizeof(*work), GFP_ATOMIC);
6647 		if (!work)
6648 			return -ENOMEM;
6649 
6650 		INIT_WORK(&work->work, vblank_control_worker);
6651 		work->dm = dm;
6652 		work->acrtc = acrtc;
6653 		work->enable = enable;
6654 
6655 		if (acrtc_state->stream) {
6656 			dc_stream_retain(acrtc_state->stream);
6657 			work->stream = acrtc_state->stream;
6658 		}
6659 
6660 		queue_work(dm->vblank_control_workqueue, &work->work);
6661 	}
6662 #endif
6663 
6664 	return 0;
6665 }
6666 
6667 static int dm_enable_vblank(struct drm_crtc *crtc)
6668 {
6669 	return dm_set_vblank(crtc, true);
6670 }
6671 
6672 static void dm_disable_vblank(struct drm_crtc *crtc)
6673 {
6674 	dm_set_vblank(crtc, false);
6675 }
6676 
6677 /* Implemented only the options currently availible for the driver */
6678 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
6679 	.reset = dm_crtc_reset_state,
6680 	.destroy = amdgpu_dm_crtc_destroy,
6681 	.set_config = drm_atomic_helper_set_config,
6682 	.page_flip = drm_atomic_helper_page_flip,
6683 	.atomic_duplicate_state = dm_crtc_duplicate_state,
6684 	.atomic_destroy_state = dm_crtc_destroy_state,
6685 	.set_crc_source = amdgpu_dm_crtc_set_crc_source,
6686 	.verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
6687 	.get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
6688 	.get_vblank_counter = amdgpu_get_vblank_counter_kms,
6689 	.enable_vblank = dm_enable_vblank,
6690 	.disable_vblank = dm_disable_vblank,
6691 	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
6692 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
6693 	.late_register = amdgpu_dm_crtc_late_register,
6694 #endif
6695 };
6696 
6697 static enum drm_connector_status
6698 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
6699 {
6700 	bool connected;
6701 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6702 
6703 	/*
6704 	 * Notes:
6705 	 * 1. This interface is NOT called in context of HPD irq.
6706 	 * 2. This interface *is called* in context of user-mode ioctl. Which
6707 	 * makes it a bad place for *any* MST-related activity.
6708 	 */
6709 
6710 	if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
6711 	    !aconnector->fake_enable)
6712 		connected = (aconnector->dc_sink != NULL);
6713 	else
6714 		connected = (aconnector->base.force == DRM_FORCE_ON);
6715 
6716 	update_subconnector_property(aconnector);
6717 
6718 	return (connected ? connector_status_connected :
6719 			connector_status_disconnected);
6720 }
6721 
6722 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
6723 					    struct drm_connector_state *connector_state,
6724 					    struct drm_property *property,
6725 					    uint64_t val)
6726 {
6727 	struct drm_device *dev = connector->dev;
6728 	struct amdgpu_device *adev = drm_to_adev(dev);
6729 	struct dm_connector_state *dm_old_state =
6730 		to_dm_connector_state(connector->state);
6731 	struct dm_connector_state *dm_new_state =
6732 		to_dm_connector_state(connector_state);
6733 
6734 	int ret = -EINVAL;
6735 
6736 	if (property == dev->mode_config.scaling_mode_property) {
6737 		enum amdgpu_rmx_type rmx_type;
6738 
6739 		switch (val) {
6740 		case DRM_MODE_SCALE_CENTER:
6741 			rmx_type = RMX_CENTER;
6742 			break;
6743 		case DRM_MODE_SCALE_ASPECT:
6744 			rmx_type = RMX_ASPECT;
6745 			break;
6746 		case DRM_MODE_SCALE_FULLSCREEN:
6747 			rmx_type = RMX_FULL;
6748 			break;
6749 		case DRM_MODE_SCALE_NONE:
6750 		default:
6751 			rmx_type = RMX_OFF;
6752 			break;
6753 		}
6754 
6755 		if (dm_old_state->scaling == rmx_type)
6756 			return 0;
6757 
6758 		dm_new_state->scaling = rmx_type;
6759 		ret = 0;
6760 	} else if (property == adev->mode_info.underscan_hborder_property) {
6761 		dm_new_state->underscan_hborder = val;
6762 		ret = 0;
6763 	} else if (property == adev->mode_info.underscan_vborder_property) {
6764 		dm_new_state->underscan_vborder = val;
6765 		ret = 0;
6766 	} else if (property == adev->mode_info.underscan_property) {
6767 		dm_new_state->underscan_enable = val;
6768 		ret = 0;
6769 	} else if (property == adev->mode_info.abm_level_property) {
6770 		dm_new_state->abm_level = val;
6771 		ret = 0;
6772 	}
6773 
6774 	return ret;
6775 }
6776 
6777 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
6778 					    const struct drm_connector_state *state,
6779 					    struct drm_property *property,
6780 					    uint64_t *val)
6781 {
6782 	struct drm_device *dev = connector->dev;
6783 	struct amdgpu_device *adev = drm_to_adev(dev);
6784 	struct dm_connector_state *dm_state =
6785 		to_dm_connector_state(state);
6786 	int ret = -EINVAL;
6787 
6788 	if (property == dev->mode_config.scaling_mode_property) {
6789 		switch (dm_state->scaling) {
6790 		case RMX_CENTER:
6791 			*val = DRM_MODE_SCALE_CENTER;
6792 			break;
6793 		case RMX_ASPECT:
6794 			*val = DRM_MODE_SCALE_ASPECT;
6795 			break;
6796 		case RMX_FULL:
6797 			*val = DRM_MODE_SCALE_FULLSCREEN;
6798 			break;
6799 		case RMX_OFF:
6800 		default:
6801 			*val = DRM_MODE_SCALE_NONE;
6802 			break;
6803 		}
6804 		ret = 0;
6805 	} else if (property == adev->mode_info.underscan_hborder_property) {
6806 		*val = dm_state->underscan_hborder;
6807 		ret = 0;
6808 	} else if (property == adev->mode_info.underscan_vborder_property) {
6809 		*val = dm_state->underscan_vborder;
6810 		ret = 0;
6811 	} else if (property == adev->mode_info.underscan_property) {
6812 		*val = dm_state->underscan_enable;
6813 		ret = 0;
6814 	} else if (property == adev->mode_info.abm_level_property) {
6815 		*val = dm_state->abm_level;
6816 		ret = 0;
6817 	}
6818 
6819 	return ret;
6820 }
6821 
6822 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
6823 {
6824 	struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
6825 
6826 	drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
6827 }
6828 
6829 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
6830 {
6831 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6832 	const struct dc_link *link = aconnector->dc_link;
6833 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
6834 	struct amdgpu_display_manager *dm = &adev->dm;
6835 	int i;
6836 
6837 	/*
6838 	 * Call only if mst_mgr was iniitalized before since it's not done
6839 	 * for all connector types.
6840 	 */
6841 	if (aconnector->mst_mgr.dev)
6842 		drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
6843 
6844 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
6845 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
6846 	for (i = 0; i < dm->num_of_edps; i++) {
6847 		if ((link == dm->backlight_link[i]) && dm->backlight_dev[i]) {
6848 			backlight_device_unregister(dm->backlight_dev[i]);
6849 			dm->backlight_dev[i] = NULL;
6850 		}
6851 	}
6852 #endif
6853 
6854 	if (aconnector->dc_em_sink)
6855 		dc_sink_release(aconnector->dc_em_sink);
6856 	aconnector->dc_em_sink = NULL;
6857 	if (aconnector->dc_sink)
6858 		dc_sink_release(aconnector->dc_sink);
6859 	aconnector->dc_sink = NULL;
6860 
6861 	drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
6862 	drm_connector_unregister(connector);
6863 	drm_connector_cleanup(connector);
6864 	if (aconnector->i2c) {
6865 		i2c_del_adapter(&aconnector->i2c->base);
6866 		kfree(aconnector->i2c);
6867 	}
6868 	kfree(aconnector->dm_dp_aux.aux.name);
6869 
6870 	kfree(connector);
6871 }
6872 
6873 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
6874 {
6875 	struct dm_connector_state *state =
6876 		to_dm_connector_state(connector->state);
6877 
6878 	if (connector->state)
6879 		__drm_atomic_helper_connector_destroy_state(connector->state);
6880 
6881 	kfree(state);
6882 
6883 	state = kzalloc(sizeof(*state), GFP_KERNEL);
6884 
6885 	if (state) {
6886 		state->scaling = RMX_OFF;
6887 		state->underscan_enable = false;
6888 		state->underscan_hborder = 0;
6889 		state->underscan_vborder = 0;
6890 		state->base.max_requested_bpc = 8;
6891 		state->vcpi_slots = 0;
6892 		state->pbn = 0;
6893 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
6894 			state->abm_level = amdgpu_dm_abm_level;
6895 
6896 		__drm_atomic_helper_connector_reset(connector, &state->base);
6897 	}
6898 }
6899 
6900 struct drm_connector_state *
6901 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
6902 {
6903 	struct dm_connector_state *state =
6904 		to_dm_connector_state(connector->state);
6905 
6906 	struct dm_connector_state *new_state =
6907 			kmemdup(state, sizeof(*state), GFP_KERNEL);
6908 
6909 	if (!new_state)
6910 		return NULL;
6911 
6912 	__drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
6913 
6914 	new_state->freesync_capable = state->freesync_capable;
6915 	new_state->abm_level = state->abm_level;
6916 	new_state->scaling = state->scaling;
6917 	new_state->underscan_enable = state->underscan_enable;
6918 	new_state->underscan_hborder = state->underscan_hborder;
6919 	new_state->underscan_vborder = state->underscan_vborder;
6920 	new_state->vcpi_slots = state->vcpi_slots;
6921 	new_state->pbn = state->pbn;
6922 	return &new_state->base;
6923 }
6924 
6925 static int
6926 amdgpu_dm_connector_late_register(struct drm_connector *connector)
6927 {
6928 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6929 		to_amdgpu_dm_connector(connector);
6930 	int r;
6931 
6932 	if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
6933 	    (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
6934 		amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
6935 		r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
6936 		if (r)
6937 			return r;
6938 	}
6939 
6940 #if defined(CONFIG_DEBUG_FS)
6941 	connector_debugfs_init(amdgpu_dm_connector);
6942 #endif
6943 
6944 	return 0;
6945 }
6946 
6947 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
6948 	.reset = amdgpu_dm_connector_funcs_reset,
6949 	.detect = amdgpu_dm_connector_detect,
6950 	.fill_modes = drm_helper_probe_single_connector_modes,
6951 	.destroy = amdgpu_dm_connector_destroy,
6952 	.atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
6953 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6954 	.atomic_set_property = amdgpu_dm_connector_atomic_set_property,
6955 	.atomic_get_property = amdgpu_dm_connector_atomic_get_property,
6956 	.late_register = amdgpu_dm_connector_late_register,
6957 	.early_unregister = amdgpu_dm_connector_unregister
6958 };
6959 
6960 static int get_modes(struct drm_connector *connector)
6961 {
6962 	return amdgpu_dm_connector_get_modes(connector);
6963 }
6964 
6965 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
6966 {
6967 	struct dc_sink_init_data init_params = {
6968 			.link = aconnector->dc_link,
6969 			.sink_signal = SIGNAL_TYPE_VIRTUAL
6970 	};
6971 	struct edid *edid;
6972 
6973 	if (!aconnector->base.edid_blob_ptr) {
6974 		DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
6975 				aconnector->base.name);
6976 
6977 		aconnector->base.force = DRM_FORCE_OFF;
6978 		aconnector->base.override_edid = false;
6979 		return;
6980 	}
6981 
6982 	edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
6983 
6984 	aconnector->edid = edid;
6985 
6986 	aconnector->dc_em_sink = dc_link_add_remote_sink(
6987 		aconnector->dc_link,
6988 		(uint8_t *)edid,
6989 		(edid->extensions + 1) * EDID_LENGTH,
6990 		&init_params);
6991 
6992 	if (aconnector->base.force == DRM_FORCE_ON) {
6993 		aconnector->dc_sink = aconnector->dc_link->local_sink ?
6994 		aconnector->dc_link->local_sink :
6995 		aconnector->dc_em_sink;
6996 		dc_sink_retain(aconnector->dc_sink);
6997 	}
6998 }
6999 
7000 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
7001 {
7002 	struct dc_link *link = (struct dc_link *)aconnector->dc_link;
7003 
7004 	/*
7005 	 * In case of headless boot with force on for DP managed connector
7006 	 * Those settings have to be != 0 to get initial modeset
7007 	 */
7008 	if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
7009 		link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
7010 		link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
7011 	}
7012 
7013 
7014 	aconnector->base.override_edid = true;
7015 	create_eml_sink(aconnector);
7016 }
7017 
7018 struct dc_stream_state *
7019 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
7020 				const struct drm_display_mode *drm_mode,
7021 				const struct dm_connector_state *dm_state,
7022 				const struct dc_stream_state *old_stream)
7023 {
7024 	struct drm_connector *connector = &aconnector->base;
7025 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
7026 	struct dc_stream_state *stream;
7027 	const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
7028 	int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
7029 	enum dc_status dc_result = DC_OK;
7030 
7031 	do {
7032 		stream = create_stream_for_sink(aconnector, drm_mode,
7033 						dm_state, old_stream,
7034 						requested_bpc);
7035 		if (stream == NULL) {
7036 			DRM_ERROR("Failed to create stream for sink!\n");
7037 			break;
7038 		}
7039 
7040 		dc_result = dc_validate_stream(adev->dm.dc, stream);
7041 
7042 		if (dc_result != DC_OK) {
7043 			DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
7044 				      drm_mode->hdisplay,
7045 				      drm_mode->vdisplay,
7046 				      drm_mode->clock,
7047 				      dc_result,
7048 				      dc_status_to_str(dc_result));
7049 
7050 			dc_stream_release(stream);
7051 			stream = NULL;
7052 			requested_bpc -= 2; /* lower bpc to retry validation */
7053 		}
7054 
7055 	} while (stream == NULL && requested_bpc >= 6);
7056 
7057 	if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
7058 		DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
7059 
7060 		aconnector->force_yuv420_output = true;
7061 		stream = create_validate_stream_for_sink(aconnector, drm_mode,
7062 						dm_state, old_stream);
7063 		aconnector->force_yuv420_output = false;
7064 	}
7065 
7066 	return stream;
7067 }
7068 
7069 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
7070 				   struct drm_display_mode *mode)
7071 {
7072 	int result = MODE_ERROR;
7073 	struct dc_sink *dc_sink;
7074 	/* TODO: Unhardcode stream count */
7075 	struct dc_stream_state *stream;
7076 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7077 
7078 	if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
7079 			(mode->flags & DRM_MODE_FLAG_DBLSCAN))
7080 		return result;
7081 
7082 	/*
7083 	 * Only run this the first time mode_valid is called to initilialize
7084 	 * EDID mgmt
7085 	 */
7086 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
7087 		!aconnector->dc_em_sink)
7088 		handle_edid_mgmt(aconnector);
7089 
7090 	dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
7091 
7092 	if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
7093 				aconnector->base.force != DRM_FORCE_ON) {
7094 		DRM_ERROR("dc_sink is NULL!\n");
7095 		goto fail;
7096 	}
7097 
7098 	stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
7099 	if (stream) {
7100 		dc_stream_release(stream);
7101 		result = MODE_OK;
7102 	}
7103 
7104 fail:
7105 	/* TODO: error handling*/
7106 	return result;
7107 }
7108 
7109 static int fill_hdr_info_packet(const struct drm_connector_state *state,
7110 				struct dc_info_packet *out)
7111 {
7112 	struct hdmi_drm_infoframe frame;
7113 	unsigned char buf[30]; /* 26 + 4 */
7114 	ssize_t len;
7115 	int ret, i;
7116 
7117 	memset(out, 0, sizeof(*out));
7118 
7119 	if (!state->hdr_output_metadata)
7120 		return 0;
7121 
7122 	ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
7123 	if (ret)
7124 		return ret;
7125 
7126 	len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
7127 	if (len < 0)
7128 		return (int)len;
7129 
7130 	/* Static metadata is a fixed 26 bytes + 4 byte header. */
7131 	if (len != 30)
7132 		return -EINVAL;
7133 
7134 	/* Prepare the infopacket for DC. */
7135 	switch (state->connector->connector_type) {
7136 	case DRM_MODE_CONNECTOR_HDMIA:
7137 		out->hb0 = 0x87; /* type */
7138 		out->hb1 = 0x01; /* version */
7139 		out->hb2 = 0x1A; /* length */
7140 		out->sb[0] = buf[3]; /* checksum */
7141 		i = 1;
7142 		break;
7143 
7144 	case DRM_MODE_CONNECTOR_DisplayPort:
7145 	case DRM_MODE_CONNECTOR_eDP:
7146 		out->hb0 = 0x00; /* sdp id, zero */
7147 		out->hb1 = 0x87; /* type */
7148 		out->hb2 = 0x1D; /* payload len - 1 */
7149 		out->hb3 = (0x13 << 2); /* sdp version */
7150 		out->sb[0] = 0x01; /* version */
7151 		out->sb[1] = 0x1A; /* length */
7152 		i = 2;
7153 		break;
7154 
7155 	default:
7156 		return -EINVAL;
7157 	}
7158 
7159 	memcpy(&out->sb[i], &buf[4], 26);
7160 	out->valid = true;
7161 
7162 	print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
7163 		       sizeof(out->sb), false);
7164 
7165 	return 0;
7166 }
7167 
7168 static int
7169 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
7170 				 struct drm_atomic_state *state)
7171 {
7172 	struct drm_connector_state *new_con_state =
7173 		drm_atomic_get_new_connector_state(state, conn);
7174 	struct drm_connector_state *old_con_state =
7175 		drm_atomic_get_old_connector_state(state, conn);
7176 	struct drm_crtc *crtc = new_con_state->crtc;
7177 	struct drm_crtc_state *new_crtc_state;
7178 	int ret;
7179 
7180 	trace_amdgpu_dm_connector_atomic_check(new_con_state);
7181 
7182 	if (!crtc)
7183 		return 0;
7184 
7185 	if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
7186 		struct dc_info_packet hdr_infopacket;
7187 
7188 		ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
7189 		if (ret)
7190 			return ret;
7191 
7192 		new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
7193 		if (IS_ERR(new_crtc_state))
7194 			return PTR_ERR(new_crtc_state);
7195 
7196 		/*
7197 		 * DC considers the stream backends changed if the
7198 		 * static metadata changes. Forcing the modeset also
7199 		 * gives a simple way for userspace to switch from
7200 		 * 8bpc to 10bpc when setting the metadata to enter
7201 		 * or exit HDR.
7202 		 *
7203 		 * Changing the static metadata after it's been
7204 		 * set is permissible, however. So only force a
7205 		 * modeset if we're entering or exiting HDR.
7206 		 */
7207 		new_crtc_state->mode_changed =
7208 			!old_con_state->hdr_output_metadata ||
7209 			!new_con_state->hdr_output_metadata;
7210 	}
7211 
7212 	return 0;
7213 }
7214 
7215 static const struct drm_connector_helper_funcs
7216 amdgpu_dm_connector_helper_funcs = {
7217 	/*
7218 	 * If hotplugging a second bigger display in FB Con mode, bigger resolution
7219 	 * modes will be filtered by drm_mode_validate_size(), and those modes
7220 	 * are missing after user start lightdm. So we need to renew modes list.
7221 	 * in get_modes call back, not just return the modes count
7222 	 */
7223 	.get_modes = get_modes,
7224 	.mode_valid = amdgpu_dm_connector_mode_valid,
7225 	.atomic_check = amdgpu_dm_connector_atomic_check,
7226 };
7227 
7228 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
7229 {
7230 }
7231 
7232 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
7233 {
7234 	struct drm_atomic_state *state = new_crtc_state->state;
7235 	struct drm_plane *plane;
7236 	int num_active = 0;
7237 
7238 	drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
7239 		struct drm_plane_state *new_plane_state;
7240 
7241 		/* Cursor planes are "fake". */
7242 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
7243 			continue;
7244 
7245 		new_plane_state = drm_atomic_get_new_plane_state(state, plane);
7246 
7247 		if (!new_plane_state) {
7248 			/*
7249 			 * The plane is enable on the CRTC and hasn't changed
7250 			 * state. This means that it previously passed
7251 			 * validation and is therefore enabled.
7252 			 */
7253 			num_active += 1;
7254 			continue;
7255 		}
7256 
7257 		/* We need a framebuffer to be considered enabled. */
7258 		num_active += (new_plane_state->fb != NULL);
7259 	}
7260 
7261 	return num_active;
7262 }
7263 
7264 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
7265 					 struct drm_crtc_state *new_crtc_state)
7266 {
7267 	struct dm_crtc_state *dm_new_crtc_state =
7268 		to_dm_crtc_state(new_crtc_state);
7269 
7270 	dm_new_crtc_state->active_planes = 0;
7271 
7272 	if (!dm_new_crtc_state->stream)
7273 		return;
7274 
7275 	dm_new_crtc_state->active_planes =
7276 		count_crtc_active_planes(new_crtc_state);
7277 }
7278 
7279 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
7280 				       struct drm_atomic_state *state)
7281 {
7282 	struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
7283 									  crtc);
7284 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
7285 	struct dc *dc = adev->dm.dc;
7286 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
7287 	int ret = -EINVAL;
7288 
7289 	trace_amdgpu_dm_crtc_atomic_check(crtc_state);
7290 
7291 	dm_update_crtc_active_planes(crtc, crtc_state);
7292 
7293 	if (WARN_ON(unlikely(!dm_crtc_state->stream &&
7294 		     modeset_required(crtc_state, NULL, dm_crtc_state->stream)))) {
7295 		return ret;
7296 	}
7297 
7298 	/*
7299 	 * We require the primary plane to be enabled whenever the CRTC is, otherwise
7300 	 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
7301 	 * planes are disabled, which is not supported by the hardware. And there is legacy
7302 	 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
7303 	 */
7304 	if (crtc_state->enable &&
7305 	    !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
7306 		DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
7307 		return -EINVAL;
7308 	}
7309 
7310 	/* In some use cases, like reset, no stream is attached */
7311 	if (!dm_crtc_state->stream)
7312 		return 0;
7313 
7314 	if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
7315 		return 0;
7316 
7317 	DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
7318 	return ret;
7319 }
7320 
7321 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
7322 				      const struct drm_display_mode *mode,
7323 				      struct drm_display_mode *adjusted_mode)
7324 {
7325 	return true;
7326 }
7327 
7328 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
7329 	.disable = dm_crtc_helper_disable,
7330 	.atomic_check = dm_crtc_helper_atomic_check,
7331 	.mode_fixup = dm_crtc_helper_mode_fixup,
7332 	.get_scanout_position = amdgpu_crtc_get_scanout_position,
7333 };
7334 
7335 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
7336 {
7337 
7338 }
7339 
7340 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
7341 {
7342 	switch (display_color_depth) {
7343 		case COLOR_DEPTH_666:
7344 			return 6;
7345 		case COLOR_DEPTH_888:
7346 			return 8;
7347 		case COLOR_DEPTH_101010:
7348 			return 10;
7349 		case COLOR_DEPTH_121212:
7350 			return 12;
7351 		case COLOR_DEPTH_141414:
7352 			return 14;
7353 		case COLOR_DEPTH_161616:
7354 			return 16;
7355 		default:
7356 			break;
7357 		}
7358 	return 0;
7359 }
7360 
7361 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
7362 					  struct drm_crtc_state *crtc_state,
7363 					  struct drm_connector_state *conn_state)
7364 {
7365 	struct drm_atomic_state *state = crtc_state->state;
7366 	struct drm_connector *connector = conn_state->connector;
7367 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7368 	struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
7369 	const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
7370 	struct drm_dp_mst_topology_mgr *mst_mgr;
7371 	struct drm_dp_mst_port *mst_port;
7372 	enum dc_color_depth color_depth;
7373 	int clock, bpp = 0;
7374 	bool is_y420 = false;
7375 
7376 	if (!aconnector->port || !aconnector->dc_sink)
7377 		return 0;
7378 
7379 	mst_port = aconnector->port;
7380 	mst_mgr = &aconnector->mst_port->mst_mgr;
7381 
7382 	if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
7383 		return 0;
7384 
7385 	if (!state->duplicated) {
7386 		int max_bpc = conn_state->max_requested_bpc;
7387 		is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
7388 				aconnector->force_yuv420_output;
7389 		color_depth = convert_color_depth_from_display_info(connector,
7390 								    is_y420,
7391 								    max_bpc);
7392 		bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
7393 		clock = adjusted_mode->clock;
7394 		dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
7395 	}
7396 	dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
7397 									   mst_mgr,
7398 									   mst_port,
7399 									   dm_new_connector_state->pbn,
7400 									   dm_mst_get_pbn_divider(aconnector->dc_link));
7401 	if (dm_new_connector_state->vcpi_slots < 0) {
7402 		DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
7403 		return dm_new_connector_state->vcpi_slots;
7404 	}
7405 	return 0;
7406 }
7407 
7408 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
7409 	.disable = dm_encoder_helper_disable,
7410 	.atomic_check = dm_encoder_helper_atomic_check
7411 };
7412 
7413 #if defined(CONFIG_DRM_AMD_DC_DCN)
7414 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
7415 					    struct dc_state *dc_state,
7416 					    struct dsc_mst_fairness_vars *vars)
7417 {
7418 	struct dc_stream_state *stream = NULL;
7419 	struct drm_connector *connector;
7420 	struct drm_connector_state *new_con_state;
7421 	struct amdgpu_dm_connector *aconnector;
7422 	struct dm_connector_state *dm_conn_state;
7423 	int i, j;
7424 	int vcpi, pbn_div, pbn, slot_num = 0;
7425 
7426 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
7427 
7428 		aconnector = to_amdgpu_dm_connector(connector);
7429 
7430 		if (!aconnector->port)
7431 			continue;
7432 
7433 		if (!new_con_state || !new_con_state->crtc)
7434 			continue;
7435 
7436 		dm_conn_state = to_dm_connector_state(new_con_state);
7437 
7438 		for (j = 0; j < dc_state->stream_count; j++) {
7439 			stream = dc_state->streams[j];
7440 			if (!stream)
7441 				continue;
7442 
7443 			if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
7444 				break;
7445 
7446 			stream = NULL;
7447 		}
7448 
7449 		if (!stream)
7450 			continue;
7451 
7452 		pbn_div = dm_mst_get_pbn_divider(stream->link);
7453 		/* pbn is calculated by compute_mst_dsc_configs_for_state*/
7454 		for (j = 0; j < dc_state->stream_count; j++) {
7455 			if (vars[j].aconnector == aconnector) {
7456 				pbn = vars[j].pbn;
7457 				break;
7458 			}
7459 		}
7460 
7461 		if (j == dc_state->stream_count)
7462 			continue;
7463 
7464 		slot_num = DIV_ROUND_UP(pbn, pbn_div);
7465 
7466 		if (stream->timing.flags.DSC != 1) {
7467 			dm_conn_state->pbn = pbn;
7468 			dm_conn_state->vcpi_slots = slot_num;
7469 
7470 			drm_dp_mst_atomic_enable_dsc(state,
7471 						     aconnector->port,
7472 						     dm_conn_state->pbn,
7473 						     0,
7474 						     false);
7475 			continue;
7476 		}
7477 
7478 		vcpi = drm_dp_mst_atomic_enable_dsc(state,
7479 						    aconnector->port,
7480 						    pbn, pbn_div,
7481 						    true);
7482 		if (vcpi < 0)
7483 			return vcpi;
7484 
7485 		dm_conn_state->pbn = pbn;
7486 		dm_conn_state->vcpi_slots = vcpi;
7487 	}
7488 	return 0;
7489 }
7490 #endif
7491 
7492 static void dm_drm_plane_reset(struct drm_plane *plane)
7493 {
7494 	struct dm_plane_state *amdgpu_state = NULL;
7495 
7496 	if (plane->state)
7497 		plane->funcs->atomic_destroy_state(plane, plane->state);
7498 
7499 	amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
7500 	WARN_ON(amdgpu_state == NULL);
7501 
7502 	if (amdgpu_state)
7503 		__drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
7504 }
7505 
7506 static struct drm_plane_state *
7507 dm_drm_plane_duplicate_state(struct drm_plane *plane)
7508 {
7509 	struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
7510 
7511 	old_dm_plane_state = to_dm_plane_state(plane->state);
7512 	dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
7513 	if (!dm_plane_state)
7514 		return NULL;
7515 
7516 	__drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
7517 
7518 	if (old_dm_plane_state->dc_state) {
7519 		dm_plane_state->dc_state = old_dm_plane_state->dc_state;
7520 		dc_plane_state_retain(dm_plane_state->dc_state);
7521 	}
7522 
7523 	return &dm_plane_state->base;
7524 }
7525 
7526 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
7527 				struct drm_plane_state *state)
7528 {
7529 	struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
7530 
7531 	if (dm_plane_state->dc_state)
7532 		dc_plane_state_release(dm_plane_state->dc_state);
7533 
7534 	drm_atomic_helper_plane_destroy_state(plane, state);
7535 }
7536 
7537 static const struct drm_plane_funcs dm_plane_funcs = {
7538 	.update_plane	= drm_atomic_helper_update_plane,
7539 	.disable_plane	= drm_atomic_helper_disable_plane,
7540 	.destroy	= drm_primary_helper_destroy,
7541 	.reset = dm_drm_plane_reset,
7542 	.atomic_duplicate_state = dm_drm_plane_duplicate_state,
7543 	.atomic_destroy_state = dm_drm_plane_destroy_state,
7544 	.format_mod_supported = dm_plane_format_mod_supported,
7545 };
7546 
7547 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
7548 				      struct drm_plane_state *new_state)
7549 {
7550 	struct amdgpu_framebuffer *afb;
7551 	struct drm_gem_object *obj;
7552 	struct amdgpu_device *adev;
7553 	struct amdgpu_bo *rbo;
7554 	struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
7555 	struct list_head list;
7556 	struct ttm_validate_buffer tv;
7557 	struct ww_acquire_ctx ticket;
7558 	uint32_t domain;
7559 	int r;
7560 
7561 	if (!new_state->fb) {
7562 		DRM_DEBUG_KMS("No FB bound\n");
7563 		return 0;
7564 	}
7565 
7566 	afb = to_amdgpu_framebuffer(new_state->fb);
7567 	obj = new_state->fb->obj[0];
7568 	rbo = gem_to_amdgpu_bo(obj);
7569 	adev = amdgpu_ttm_adev(rbo->tbo.bdev);
7570 	INIT_LIST_HEAD(&list);
7571 
7572 	tv.bo = &rbo->tbo;
7573 	tv.num_shared = 1;
7574 	list_add(&tv.head, &list);
7575 
7576 	r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
7577 	if (r) {
7578 		dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
7579 		return r;
7580 	}
7581 
7582 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
7583 		domain = amdgpu_display_supported_domains(adev, rbo->flags);
7584 	else
7585 		domain = AMDGPU_GEM_DOMAIN_VRAM;
7586 
7587 	r = amdgpu_bo_pin(rbo, domain);
7588 	if (unlikely(r != 0)) {
7589 		if (r != -ERESTARTSYS)
7590 			DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
7591 		ttm_eu_backoff_reservation(&ticket, &list);
7592 		return r;
7593 	}
7594 
7595 	r = amdgpu_ttm_alloc_gart(&rbo->tbo);
7596 	if (unlikely(r != 0)) {
7597 		amdgpu_bo_unpin(rbo);
7598 		ttm_eu_backoff_reservation(&ticket, &list);
7599 		DRM_ERROR("%p bind failed\n", rbo);
7600 		return r;
7601 	}
7602 
7603 	ttm_eu_backoff_reservation(&ticket, &list);
7604 
7605 	afb->address = amdgpu_bo_gpu_offset(rbo);
7606 
7607 	amdgpu_bo_ref(rbo);
7608 
7609 	/**
7610 	 * We don't do surface updates on planes that have been newly created,
7611 	 * but we also don't have the afb->address during atomic check.
7612 	 *
7613 	 * Fill in buffer attributes depending on the address here, but only on
7614 	 * newly created planes since they're not being used by DC yet and this
7615 	 * won't modify global state.
7616 	 */
7617 	dm_plane_state_old = to_dm_plane_state(plane->state);
7618 	dm_plane_state_new = to_dm_plane_state(new_state);
7619 
7620 	if (dm_plane_state_new->dc_state &&
7621 	    dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
7622 		struct dc_plane_state *plane_state =
7623 			dm_plane_state_new->dc_state;
7624 		bool force_disable_dcc = !plane_state->dcc.enable;
7625 
7626 		fill_plane_buffer_attributes(
7627 			adev, afb, plane_state->format, plane_state->rotation,
7628 			afb->tiling_flags,
7629 			&plane_state->tiling_info, &plane_state->plane_size,
7630 			&plane_state->dcc, &plane_state->address,
7631 			afb->tmz_surface, force_disable_dcc);
7632 	}
7633 
7634 	return 0;
7635 }
7636 
7637 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
7638 				       struct drm_plane_state *old_state)
7639 {
7640 	struct amdgpu_bo *rbo;
7641 	int r;
7642 
7643 	if (!old_state->fb)
7644 		return;
7645 
7646 	rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
7647 	r = amdgpu_bo_reserve(rbo, false);
7648 	if (unlikely(r)) {
7649 		DRM_ERROR("failed to reserve rbo before unpin\n");
7650 		return;
7651 	}
7652 
7653 	amdgpu_bo_unpin(rbo);
7654 	amdgpu_bo_unreserve(rbo);
7655 	amdgpu_bo_unref(&rbo);
7656 }
7657 
7658 static int dm_plane_helper_check_state(struct drm_plane_state *state,
7659 				       struct drm_crtc_state *new_crtc_state)
7660 {
7661 	struct drm_framebuffer *fb = state->fb;
7662 	int min_downscale, max_upscale;
7663 	int min_scale = 0;
7664 	int max_scale = INT_MAX;
7665 
7666 	/* Plane enabled? Validate viewport and get scaling factors from plane caps. */
7667 	if (fb && state->crtc) {
7668 		/* Validate viewport to cover the case when only the position changes */
7669 		if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
7670 			int viewport_width = state->crtc_w;
7671 			int viewport_height = state->crtc_h;
7672 
7673 			if (state->crtc_x < 0)
7674 				viewport_width += state->crtc_x;
7675 			else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
7676 				viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
7677 
7678 			if (state->crtc_y < 0)
7679 				viewport_height += state->crtc_y;
7680 			else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
7681 				viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
7682 
7683 			if (viewport_width < 0 || viewport_height < 0) {
7684 				DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
7685 				return -EINVAL;
7686 			} else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
7687 				DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
7688 				return -EINVAL;
7689 			} else if (viewport_height < MIN_VIEWPORT_SIZE) {
7690 				DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
7691 				return -EINVAL;
7692 			}
7693 
7694 		}
7695 
7696 		/* Get min/max allowed scaling factors from plane caps. */
7697 		get_min_max_dc_plane_scaling(state->crtc->dev, fb,
7698 					     &min_downscale, &max_upscale);
7699 		/*
7700 		 * Convert to drm convention: 16.16 fixed point, instead of dc's
7701 		 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
7702 		 * dst/src, so min_scale = 1.0 / max_upscale, etc.
7703 		 */
7704 		min_scale = (1000 << 16) / max_upscale;
7705 		max_scale = (1000 << 16) / min_downscale;
7706 	}
7707 
7708 	return drm_atomic_helper_check_plane_state(
7709 		state, new_crtc_state, min_scale, max_scale, true, true);
7710 }
7711 
7712 static int dm_plane_atomic_check(struct drm_plane *plane,
7713 				 struct drm_atomic_state *state)
7714 {
7715 	struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
7716 										 plane);
7717 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
7718 	struct dc *dc = adev->dm.dc;
7719 	struct dm_plane_state *dm_plane_state;
7720 	struct dc_scaling_info scaling_info;
7721 	struct drm_crtc_state *new_crtc_state;
7722 	int ret;
7723 
7724 	trace_amdgpu_dm_plane_atomic_check(new_plane_state);
7725 
7726 	dm_plane_state = to_dm_plane_state(new_plane_state);
7727 
7728 	if (!dm_plane_state->dc_state)
7729 		return 0;
7730 
7731 	new_crtc_state =
7732 		drm_atomic_get_new_crtc_state(state,
7733 					      new_plane_state->crtc);
7734 	if (!new_crtc_state)
7735 		return -EINVAL;
7736 
7737 	ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
7738 	if (ret)
7739 		return ret;
7740 
7741 	ret = fill_dc_scaling_info(adev, new_plane_state, &scaling_info);
7742 	if (ret)
7743 		return ret;
7744 
7745 	if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
7746 		return 0;
7747 
7748 	return -EINVAL;
7749 }
7750 
7751 static int dm_plane_atomic_async_check(struct drm_plane *plane,
7752 				       struct drm_atomic_state *state)
7753 {
7754 	/* Only support async updates on cursor planes. */
7755 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
7756 		return -EINVAL;
7757 
7758 	return 0;
7759 }
7760 
7761 static void dm_plane_atomic_async_update(struct drm_plane *plane,
7762 					 struct drm_atomic_state *state)
7763 {
7764 	struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
7765 									   plane);
7766 	struct drm_plane_state *old_state =
7767 		drm_atomic_get_old_plane_state(state, plane);
7768 
7769 	trace_amdgpu_dm_atomic_update_cursor(new_state);
7770 
7771 	swap(plane->state->fb, new_state->fb);
7772 
7773 	plane->state->src_x = new_state->src_x;
7774 	plane->state->src_y = new_state->src_y;
7775 	plane->state->src_w = new_state->src_w;
7776 	plane->state->src_h = new_state->src_h;
7777 	plane->state->crtc_x = new_state->crtc_x;
7778 	plane->state->crtc_y = new_state->crtc_y;
7779 	plane->state->crtc_w = new_state->crtc_w;
7780 	plane->state->crtc_h = new_state->crtc_h;
7781 
7782 	handle_cursor_update(plane, old_state);
7783 }
7784 
7785 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
7786 	.prepare_fb = dm_plane_helper_prepare_fb,
7787 	.cleanup_fb = dm_plane_helper_cleanup_fb,
7788 	.atomic_check = dm_plane_atomic_check,
7789 	.atomic_async_check = dm_plane_atomic_async_check,
7790 	.atomic_async_update = dm_plane_atomic_async_update
7791 };
7792 
7793 /*
7794  * TODO: these are currently initialized to rgb formats only.
7795  * For future use cases we should either initialize them dynamically based on
7796  * plane capabilities, or initialize this array to all formats, so internal drm
7797  * check will succeed, and let DC implement proper check
7798  */
7799 static const uint32_t rgb_formats[] = {
7800 	DRM_FORMAT_XRGB8888,
7801 	DRM_FORMAT_ARGB8888,
7802 	DRM_FORMAT_RGBA8888,
7803 	DRM_FORMAT_XRGB2101010,
7804 	DRM_FORMAT_XBGR2101010,
7805 	DRM_FORMAT_ARGB2101010,
7806 	DRM_FORMAT_ABGR2101010,
7807 	DRM_FORMAT_XRGB16161616,
7808 	DRM_FORMAT_XBGR16161616,
7809 	DRM_FORMAT_ARGB16161616,
7810 	DRM_FORMAT_ABGR16161616,
7811 	DRM_FORMAT_XBGR8888,
7812 	DRM_FORMAT_ABGR8888,
7813 	DRM_FORMAT_RGB565,
7814 };
7815 
7816 static const uint32_t overlay_formats[] = {
7817 	DRM_FORMAT_XRGB8888,
7818 	DRM_FORMAT_ARGB8888,
7819 	DRM_FORMAT_RGBA8888,
7820 	DRM_FORMAT_XBGR8888,
7821 	DRM_FORMAT_ABGR8888,
7822 	DRM_FORMAT_RGB565
7823 };
7824 
7825 static const u32 cursor_formats[] = {
7826 	DRM_FORMAT_ARGB8888
7827 };
7828 
7829 static int get_plane_formats(const struct drm_plane *plane,
7830 			     const struct dc_plane_cap *plane_cap,
7831 			     uint32_t *formats, int max_formats)
7832 {
7833 	int i, num_formats = 0;
7834 
7835 	/*
7836 	 * TODO: Query support for each group of formats directly from
7837 	 * DC plane caps. This will require adding more formats to the
7838 	 * caps list.
7839 	 */
7840 
7841 	switch (plane->type) {
7842 	case DRM_PLANE_TYPE_PRIMARY:
7843 		for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
7844 			if (num_formats >= max_formats)
7845 				break;
7846 
7847 			formats[num_formats++] = rgb_formats[i];
7848 		}
7849 
7850 		if (plane_cap && plane_cap->pixel_format_support.nv12)
7851 			formats[num_formats++] = DRM_FORMAT_NV12;
7852 		if (plane_cap && plane_cap->pixel_format_support.p010)
7853 			formats[num_formats++] = DRM_FORMAT_P010;
7854 		if (plane_cap && plane_cap->pixel_format_support.fp16) {
7855 			formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
7856 			formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
7857 			formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
7858 			formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
7859 		}
7860 		break;
7861 
7862 	case DRM_PLANE_TYPE_OVERLAY:
7863 		for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
7864 			if (num_formats >= max_formats)
7865 				break;
7866 
7867 			formats[num_formats++] = overlay_formats[i];
7868 		}
7869 		break;
7870 
7871 	case DRM_PLANE_TYPE_CURSOR:
7872 		for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
7873 			if (num_formats >= max_formats)
7874 				break;
7875 
7876 			formats[num_formats++] = cursor_formats[i];
7877 		}
7878 		break;
7879 	}
7880 
7881 	return num_formats;
7882 }
7883 
7884 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
7885 				struct drm_plane *plane,
7886 				unsigned long possible_crtcs,
7887 				const struct dc_plane_cap *plane_cap)
7888 {
7889 	uint32_t formats[32];
7890 	int num_formats;
7891 	int res = -EPERM;
7892 	unsigned int supported_rotations;
7893 	uint64_t *modifiers = NULL;
7894 
7895 	num_formats = get_plane_formats(plane, plane_cap, formats,
7896 					ARRAY_SIZE(formats));
7897 
7898 	res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
7899 	if (res)
7900 		return res;
7901 
7902 	res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
7903 				       &dm_plane_funcs, formats, num_formats,
7904 				       modifiers, plane->type, NULL);
7905 	kfree(modifiers);
7906 	if (res)
7907 		return res;
7908 
7909 	if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
7910 	    plane_cap && plane_cap->per_pixel_alpha) {
7911 		unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
7912 					  BIT(DRM_MODE_BLEND_PREMULTI);
7913 
7914 		drm_plane_create_alpha_property(plane);
7915 		drm_plane_create_blend_mode_property(plane, blend_caps);
7916 	}
7917 
7918 	if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
7919 	    plane_cap &&
7920 	    (plane_cap->pixel_format_support.nv12 ||
7921 	     plane_cap->pixel_format_support.p010)) {
7922 		/* This only affects YUV formats. */
7923 		drm_plane_create_color_properties(
7924 			plane,
7925 			BIT(DRM_COLOR_YCBCR_BT601) |
7926 			BIT(DRM_COLOR_YCBCR_BT709) |
7927 			BIT(DRM_COLOR_YCBCR_BT2020),
7928 			BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
7929 			BIT(DRM_COLOR_YCBCR_FULL_RANGE),
7930 			DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
7931 	}
7932 
7933 	supported_rotations =
7934 		DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
7935 		DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
7936 
7937 	if (dm->adev->asic_type >= CHIP_BONAIRE &&
7938 	    plane->type != DRM_PLANE_TYPE_CURSOR)
7939 		drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
7940 						   supported_rotations);
7941 
7942 	drm_plane_helper_add(plane, &dm_plane_helper_funcs);
7943 
7944 	/* Create (reset) the plane state */
7945 	if (plane->funcs->reset)
7946 		plane->funcs->reset(plane);
7947 
7948 	return 0;
7949 }
7950 
7951 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
7952 			       struct drm_plane *plane,
7953 			       uint32_t crtc_index)
7954 {
7955 	struct amdgpu_crtc *acrtc = NULL;
7956 	struct drm_plane *cursor_plane;
7957 
7958 	int res = -ENOMEM;
7959 
7960 	cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
7961 	if (!cursor_plane)
7962 		goto fail;
7963 
7964 	cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
7965 	res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
7966 
7967 	acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
7968 	if (!acrtc)
7969 		goto fail;
7970 
7971 	res = drm_crtc_init_with_planes(
7972 			dm->ddev,
7973 			&acrtc->base,
7974 			plane,
7975 			cursor_plane,
7976 			&amdgpu_dm_crtc_funcs, NULL);
7977 
7978 	if (res)
7979 		goto fail;
7980 
7981 	drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
7982 
7983 	/* Create (reset) the plane state */
7984 	if (acrtc->base.funcs->reset)
7985 		acrtc->base.funcs->reset(&acrtc->base);
7986 
7987 	acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
7988 	acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
7989 
7990 	acrtc->crtc_id = crtc_index;
7991 	acrtc->base.enabled = false;
7992 	acrtc->otg_inst = -1;
7993 
7994 	dm->adev->mode_info.crtcs[crtc_index] = acrtc;
7995 	drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
7996 				   true, MAX_COLOR_LUT_ENTRIES);
7997 	drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
7998 
7999 	return 0;
8000 
8001 fail:
8002 	kfree(acrtc);
8003 	kfree(cursor_plane);
8004 	return res;
8005 }
8006 
8007 
8008 static int to_drm_connector_type(enum signal_type st)
8009 {
8010 	switch (st) {
8011 	case SIGNAL_TYPE_HDMI_TYPE_A:
8012 		return DRM_MODE_CONNECTOR_HDMIA;
8013 	case SIGNAL_TYPE_EDP:
8014 		return DRM_MODE_CONNECTOR_eDP;
8015 	case SIGNAL_TYPE_LVDS:
8016 		return DRM_MODE_CONNECTOR_LVDS;
8017 	case SIGNAL_TYPE_RGB:
8018 		return DRM_MODE_CONNECTOR_VGA;
8019 	case SIGNAL_TYPE_DISPLAY_PORT:
8020 	case SIGNAL_TYPE_DISPLAY_PORT_MST:
8021 		return DRM_MODE_CONNECTOR_DisplayPort;
8022 	case SIGNAL_TYPE_DVI_DUAL_LINK:
8023 	case SIGNAL_TYPE_DVI_SINGLE_LINK:
8024 		return DRM_MODE_CONNECTOR_DVID;
8025 	case SIGNAL_TYPE_VIRTUAL:
8026 		return DRM_MODE_CONNECTOR_VIRTUAL;
8027 
8028 	default:
8029 		return DRM_MODE_CONNECTOR_Unknown;
8030 	}
8031 }
8032 
8033 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
8034 {
8035 	struct drm_encoder *encoder;
8036 
8037 	/* There is only one encoder per connector */
8038 	drm_connector_for_each_possible_encoder(connector, encoder)
8039 		return encoder;
8040 
8041 	return NULL;
8042 }
8043 
8044 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
8045 {
8046 	struct drm_encoder *encoder;
8047 	struct amdgpu_encoder *amdgpu_encoder;
8048 
8049 	encoder = amdgpu_dm_connector_to_encoder(connector);
8050 
8051 	if (encoder == NULL)
8052 		return;
8053 
8054 	amdgpu_encoder = to_amdgpu_encoder(encoder);
8055 
8056 	amdgpu_encoder->native_mode.clock = 0;
8057 
8058 	if (!list_empty(&connector->probed_modes)) {
8059 		struct drm_display_mode *preferred_mode = NULL;
8060 
8061 		list_for_each_entry(preferred_mode,
8062 				    &connector->probed_modes,
8063 				    head) {
8064 			if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
8065 				amdgpu_encoder->native_mode = *preferred_mode;
8066 
8067 			break;
8068 		}
8069 
8070 	}
8071 }
8072 
8073 static struct drm_display_mode *
8074 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
8075 			     char *name,
8076 			     int hdisplay, int vdisplay)
8077 {
8078 	struct drm_device *dev = encoder->dev;
8079 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
8080 	struct drm_display_mode *mode = NULL;
8081 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
8082 
8083 	mode = drm_mode_duplicate(dev, native_mode);
8084 
8085 	if (mode == NULL)
8086 		return NULL;
8087 
8088 	mode->hdisplay = hdisplay;
8089 	mode->vdisplay = vdisplay;
8090 	mode->type &= ~DRM_MODE_TYPE_PREFERRED;
8091 	strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
8092 
8093 	return mode;
8094 
8095 }
8096 
8097 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
8098 						 struct drm_connector *connector)
8099 {
8100 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
8101 	struct drm_display_mode *mode = NULL;
8102 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
8103 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8104 				to_amdgpu_dm_connector(connector);
8105 	int i;
8106 	int n;
8107 	struct mode_size {
8108 		char name[DRM_DISPLAY_MODE_LEN];
8109 		int w;
8110 		int h;
8111 	} common_modes[] = {
8112 		{  "640x480",  640,  480},
8113 		{  "800x600",  800,  600},
8114 		{ "1024x768", 1024,  768},
8115 		{ "1280x720", 1280,  720},
8116 		{ "1280x800", 1280,  800},
8117 		{"1280x1024", 1280, 1024},
8118 		{ "1440x900", 1440,  900},
8119 		{"1680x1050", 1680, 1050},
8120 		{"1600x1200", 1600, 1200},
8121 		{"1920x1080", 1920, 1080},
8122 		{"1920x1200", 1920, 1200}
8123 	};
8124 
8125 	n = ARRAY_SIZE(common_modes);
8126 
8127 	for (i = 0; i < n; i++) {
8128 		struct drm_display_mode *curmode = NULL;
8129 		bool mode_existed = false;
8130 
8131 		if (common_modes[i].w > native_mode->hdisplay ||
8132 		    common_modes[i].h > native_mode->vdisplay ||
8133 		   (common_modes[i].w == native_mode->hdisplay &&
8134 		    common_modes[i].h == native_mode->vdisplay))
8135 			continue;
8136 
8137 		list_for_each_entry(curmode, &connector->probed_modes, head) {
8138 			if (common_modes[i].w == curmode->hdisplay &&
8139 			    common_modes[i].h == curmode->vdisplay) {
8140 				mode_existed = true;
8141 				break;
8142 			}
8143 		}
8144 
8145 		if (mode_existed)
8146 			continue;
8147 
8148 		mode = amdgpu_dm_create_common_mode(encoder,
8149 				common_modes[i].name, common_modes[i].w,
8150 				common_modes[i].h);
8151 		if (!mode)
8152 			continue;
8153 
8154 		drm_mode_probed_add(connector, mode);
8155 		amdgpu_dm_connector->num_modes++;
8156 	}
8157 }
8158 
8159 static void amdgpu_set_panel_orientation(struct drm_connector *connector)
8160 {
8161 	struct drm_encoder *encoder;
8162 	struct amdgpu_encoder *amdgpu_encoder;
8163 	const struct drm_display_mode *native_mode;
8164 
8165 	if (connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
8166 	    connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
8167 		return;
8168 
8169 	encoder = amdgpu_dm_connector_to_encoder(connector);
8170 	if (!encoder)
8171 		return;
8172 
8173 	amdgpu_encoder = to_amdgpu_encoder(encoder);
8174 
8175 	native_mode = &amdgpu_encoder->native_mode;
8176 	if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0)
8177 		return;
8178 
8179 	drm_connector_set_panel_orientation_with_quirk(connector,
8180 						       DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
8181 						       native_mode->hdisplay,
8182 						       native_mode->vdisplay);
8183 }
8184 
8185 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
8186 					      struct edid *edid)
8187 {
8188 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8189 			to_amdgpu_dm_connector(connector);
8190 
8191 	if (edid) {
8192 		/* empty probed_modes */
8193 		INIT_LIST_HEAD(&connector->probed_modes);
8194 		amdgpu_dm_connector->num_modes =
8195 				drm_add_edid_modes(connector, edid);
8196 
8197 		/* sorting the probed modes before calling function
8198 		 * amdgpu_dm_get_native_mode() since EDID can have
8199 		 * more than one preferred mode. The modes that are
8200 		 * later in the probed mode list could be of higher
8201 		 * and preferred resolution. For example, 3840x2160
8202 		 * resolution in base EDID preferred timing and 4096x2160
8203 		 * preferred resolution in DID extension block later.
8204 		 */
8205 		drm_mode_sort(&connector->probed_modes);
8206 		amdgpu_dm_get_native_mode(connector);
8207 
8208 		/* Freesync capabilities are reset by calling
8209 		 * drm_add_edid_modes() and need to be
8210 		 * restored here.
8211 		 */
8212 		amdgpu_dm_update_freesync_caps(connector, edid);
8213 
8214 		amdgpu_set_panel_orientation(connector);
8215 	} else {
8216 		amdgpu_dm_connector->num_modes = 0;
8217 	}
8218 }
8219 
8220 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
8221 			      struct drm_display_mode *mode)
8222 {
8223 	struct drm_display_mode *m;
8224 
8225 	list_for_each_entry (m, &aconnector->base.probed_modes, head) {
8226 		if (drm_mode_equal(m, mode))
8227 			return true;
8228 	}
8229 
8230 	return false;
8231 }
8232 
8233 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
8234 {
8235 	const struct drm_display_mode *m;
8236 	struct drm_display_mode *new_mode;
8237 	uint i;
8238 	uint32_t new_modes_count = 0;
8239 
8240 	/* Standard FPS values
8241 	 *
8242 	 * 23.976       - TV/NTSC
8243 	 * 24 	        - Cinema
8244 	 * 25 	        - TV/PAL
8245 	 * 29.97        - TV/NTSC
8246 	 * 30 	        - TV/NTSC
8247 	 * 48 	        - Cinema HFR
8248 	 * 50 	        - TV/PAL
8249 	 * 60 	        - Commonly used
8250 	 * 48,72,96,120 - Multiples of 24
8251 	 */
8252 	static const uint32_t common_rates[] = {
8253 		23976, 24000, 25000, 29970, 30000,
8254 		48000, 50000, 60000, 72000, 96000, 120000
8255 	};
8256 
8257 	/*
8258 	 * Find mode with highest refresh rate with the same resolution
8259 	 * as the preferred mode. Some monitors report a preferred mode
8260 	 * with lower resolution than the highest refresh rate supported.
8261 	 */
8262 
8263 	m = get_highest_refresh_rate_mode(aconnector, true);
8264 	if (!m)
8265 		return 0;
8266 
8267 	for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
8268 		uint64_t target_vtotal, target_vtotal_diff;
8269 		uint64_t num, den;
8270 
8271 		if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
8272 			continue;
8273 
8274 		if (common_rates[i] < aconnector->min_vfreq * 1000 ||
8275 		    common_rates[i] > aconnector->max_vfreq * 1000)
8276 			continue;
8277 
8278 		num = (unsigned long long)m->clock * 1000 * 1000;
8279 		den = common_rates[i] * (unsigned long long)m->htotal;
8280 		target_vtotal = div_u64(num, den);
8281 		target_vtotal_diff = target_vtotal - m->vtotal;
8282 
8283 		/* Check for illegal modes */
8284 		if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
8285 		    m->vsync_end + target_vtotal_diff < m->vsync_start ||
8286 		    m->vtotal + target_vtotal_diff < m->vsync_end)
8287 			continue;
8288 
8289 		new_mode = drm_mode_duplicate(aconnector->base.dev, m);
8290 		if (!new_mode)
8291 			goto out;
8292 
8293 		new_mode->vtotal += (u16)target_vtotal_diff;
8294 		new_mode->vsync_start += (u16)target_vtotal_diff;
8295 		new_mode->vsync_end += (u16)target_vtotal_diff;
8296 		new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
8297 		new_mode->type |= DRM_MODE_TYPE_DRIVER;
8298 
8299 		if (!is_duplicate_mode(aconnector, new_mode)) {
8300 			drm_mode_probed_add(&aconnector->base, new_mode);
8301 			new_modes_count += 1;
8302 		} else
8303 			drm_mode_destroy(aconnector->base.dev, new_mode);
8304 	}
8305  out:
8306 	return new_modes_count;
8307 }
8308 
8309 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
8310 						   struct edid *edid)
8311 {
8312 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8313 		to_amdgpu_dm_connector(connector);
8314 
8315 	if (!edid)
8316 		return;
8317 
8318 	if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
8319 		amdgpu_dm_connector->num_modes +=
8320 			add_fs_modes(amdgpu_dm_connector);
8321 }
8322 
8323 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
8324 {
8325 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8326 			to_amdgpu_dm_connector(connector);
8327 	struct drm_encoder *encoder;
8328 	struct edid *edid = amdgpu_dm_connector->edid;
8329 
8330 	encoder = amdgpu_dm_connector_to_encoder(connector);
8331 
8332 	if (!drm_edid_is_valid(edid)) {
8333 		amdgpu_dm_connector->num_modes =
8334 				drm_add_modes_noedid(connector, 640, 480);
8335 	} else {
8336 		amdgpu_dm_connector_ddc_get_modes(connector, edid);
8337 		amdgpu_dm_connector_add_common_modes(encoder, connector);
8338 		amdgpu_dm_connector_add_freesync_modes(connector, edid);
8339 	}
8340 	amdgpu_dm_fbc_init(connector);
8341 
8342 	return amdgpu_dm_connector->num_modes;
8343 }
8344 
8345 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
8346 				     struct amdgpu_dm_connector *aconnector,
8347 				     int connector_type,
8348 				     struct dc_link *link,
8349 				     int link_index)
8350 {
8351 	struct amdgpu_device *adev = drm_to_adev(dm->ddev);
8352 
8353 	/*
8354 	 * Some of the properties below require access to state, like bpc.
8355 	 * Allocate some default initial connector state with our reset helper.
8356 	 */
8357 	if (aconnector->base.funcs->reset)
8358 		aconnector->base.funcs->reset(&aconnector->base);
8359 
8360 	aconnector->connector_id = link_index;
8361 	aconnector->dc_link = link;
8362 	aconnector->base.interlace_allowed = false;
8363 	aconnector->base.doublescan_allowed = false;
8364 	aconnector->base.stereo_allowed = false;
8365 	aconnector->base.dpms = DRM_MODE_DPMS_OFF;
8366 	aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
8367 	aconnector->audio_inst = -1;
8368 	mutex_init(&aconnector->hpd_lock);
8369 
8370 	/*
8371 	 * configure support HPD hot plug connector_>polled default value is 0
8372 	 * which means HPD hot plug not supported
8373 	 */
8374 	switch (connector_type) {
8375 	case DRM_MODE_CONNECTOR_HDMIA:
8376 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8377 		aconnector->base.ycbcr_420_allowed =
8378 			link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
8379 		break;
8380 	case DRM_MODE_CONNECTOR_DisplayPort:
8381 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8382 		link->link_enc = link_enc_cfg_get_link_enc(link);
8383 		ASSERT(link->link_enc);
8384 		if (link->link_enc)
8385 			aconnector->base.ycbcr_420_allowed =
8386 			link->link_enc->features.dp_ycbcr420_supported ? true : false;
8387 		break;
8388 	case DRM_MODE_CONNECTOR_DVID:
8389 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8390 		break;
8391 	default:
8392 		break;
8393 	}
8394 
8395 	drm_object_attach_property(&aconnector->base.base,
8396 				dm->ddev->mode_config.scaling_mode_property,
8397 				DRM_MODE_SCALE_NONE);
8398 
8399 	drm_object_attach_property(&aconnector->base.base,
8400 				adev->mode_info.underscan_property,
8401 				UNDERSCAN_OFF);
8402 	drm_object_attach_property(&aconnector->base.base,
8403 				adev->mode_info.underscan_hborder_property,
8404 				0);
8405 	drm_object_attach_property(&aconnector->base.base,
8406 				adev->mode_info.underscan_vborder_property,
8407 				0);
8408 
8409 	if (!aconnector->mst_port)
8410 		drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
8411 
8412 	/* This defaults to the max in the range, but we want 8bpc for non-edp. */
8413 	aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
8414 	aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
8415 
8416 	if (connector_type == DRM_MODE_CONNECTOR_eDP &&
8417 	    (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
8418 		drm_object_attach_property(&aconnector->base.base,
8419 				adev->mode_info.abm_level_property, 0);
8420 	}
8421 
8422 	if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
8423 	    connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
8424 	    connector_type == DRM_MODE_CONNECTOR_eDP) {
8425 		drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
8426 
8427 		if (!aconnector->mst_port)
8428 			drm_connector_attach_vrr_capable_property(&aconnector->base);
8429 
8430 #ifdef CONFIG_DRM_AMD_DC_HDCP
8431 		if (adev->dm.hdcp_workqueue)
8432 			drm_connector_attach_content_protection_property(&aconnector->base, true);
8433 #endif
8434 	}
8435 }
8436 
8437 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
8438 			      struct i2c_msg *msgs, int num)
8439 {
8440 	struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
8441 	struct ddc_service *ddc_service = i2c->ddc_service;
8442 	struct i2c_command cmd;
8443 	int i;
8444 	int result = -EIO;
8445 
8446 	cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
8447 
8448 	if (!cmd.payloads)
8449 		return result;
8450 
8451 	cmd.number_of_payloads = num;
8452 	cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
8453 	cmd.speed = 100;
8454 
8455 	for (i = 0; i < num; i++) {
8456 		cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
8457 		cmd.payloads[i].address = msgs[i].addr;
8458 		cmd.payloads[i].length = msgs[i].len;
8459 		cmd.payloads[i].data = msgs[i].buf;
8460 	}
8461 
8462 	if (dc_submit_i2c(
8463 			ddc_service->ctx->dc,
8464 			ddc_service->ddc_pin->hw_info.ddc_channel,
8465 			&cmd))
8466 		result = num;
8467 
8468 	kfree(cmd.payloads);
8469 	return result;
8470 }
8471 
8472 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
8473 {
8474 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
8475 }
8476 
8477 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
8478 	.master_xfer = amdgpu_dm_i2c_xfer,
8479 	.functionality = amdgpu_dm_i2c_func,
8480 };
8481 
8482 static struct amdgpu_i2c_adapter *
8483 create_i2c(struct ddc_service *ddc_service,
8484 	   int link_index,
8485 	   int *res)
8486 {
8487 	struct amdgpu_device *adev = ddc_service->ctx->driver_context;
8488 	struct amdgpu_i2c_adapter *i2c;
8489 
8490 	i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
8491 	if (!i2c)
8492 		return NULL;
8493 	i2c->base.owner = THIS_MODULE;
8494 	i2c->base.class = I2C_CLASS_DDC;
8495 	i2c->base.dev.parent = &adev->pdev->dev;
8496 	i2c->base.algo = &amdgpu_dm_i2c_algo;
8497 	snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
8498 	i2c_set_adapdata(&i2c->base, i2c);
8499 	i2c->ddc_service = ddc_service;
8500 	if (i2c->ddc_service->ddc_pin)
8501 		i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
8502 
8503 	return i2c;
8504 }
8505 
8506 
8507 /*
8508  * Note: this function assumes that dc_link_detect() was called for the
8509  * dc_link which will be represented by this aconnector.
8510  */
8511 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
8512 				    struct amdgpu_dm_connector *aconnector,
8513 				    uint32_t link_index,
8514 				    struct amdgpu_encoder *aencoder)
8515 {
8516 	int res = 0;
8517 	int connector_type;
8518 	struct dc *dc = dm->dc;
8519 	struct dc_link *link = dc_get_link_at_index(dc, link_index);
8520 	struct amdgpu_i2c_adapter *i2c;
8521 
8522 	link->priv = aconnector;
8523 
8524 	DRM_DEBUG_DRIVER("%s()\n", __func__);
8525 
8526 	i2c = create_i2c(link->ddc, link->link_index, &res);
8527 	if (!i2c) {
8528 		DRM_ERROR("Failed to create i2c adapter data\n");
8529 		return -ENOMEM;
8530 	}
8531 
8532 	aconnector->i2c = i2c;
8533 	res = i2c_add_adapter(&i2c->base);
8534 
8535 	if (res) {
8536 		DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
8537 		goto out_free;
8538 	}
8539 
8540 	connector_type = to_drm_connector_type(link->connector_signal);
8541 
8542 	res = drm_connector_init_with_ddc(
8543 			dm->ddev,
8544 			&aconnector->base,
8545 			&amdgpu_dm_connector_funcs,
8546 			connector_type,
8547 			&i2c->base);
8548 
8549 	if (res) {
8550 		DRM_ERROR("connector_init failed\n");
8551 		aconnector->connector_id = -1;
8552 		goto out_free;
8553 	}
8554 
8555 	drm_connector_helper_add(
8556 			&aconnector->base,
8557 			&amdgpu_dm_connector_helper_funcs);
8558 
8559 	amdgpu_dm_connector_init_helper(
8560 		dm,
8561 		aconnector,
8562 		connector_type,
8563 		link,
8564 		link_index);
8565 
8566 	drm_connector_attach_encoder(
8567 		&aconnector->base, &aencoder->base);
8568 
8569 	if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
8570 		|| connector_type == DRM_MODE_CONNECTOR_eDP)
8571 		amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
8572 
8573 out_free:
8574 	if (res) {
8575 		kfree(i2c);
8576 		aconnector->i2c = NULL;
8577 	}
8578 	return res;
8579 }
8580 
8581 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
8582 {
8583 	switch (adev->mode_info.num_crtc) {
8584 	case 1:
8585 		return 0x1;
8586 	case 2:
8587 		return 0x3;
8588 	case 3:
8589 		return 0x7;
8590 	case 4:
8591 		return 0xf;
8592 	case 5:
8593 		return 0x1f;
8594 	case 6:
8595 	default:
8596 		return 0x3f;
8597 	}
8598 }
8599 
8600 static int amdgpu_dm_encoder_init(struct drm_device *dev,
8601 				  struct amdgpu_encoder *aencoder,
8602 				  uint32_t link_index)
8603 {
8604 	struct amdgpu_device *adev = drm_to_adev(dev);
8605 
8606 	int res = drm_encoder_init(dev,
8607 				   &aencoder->base,
8608 				   &amdgpu_dm_encoder_funcs,
8609 				   DRM_MODE_ENCODER_TMDS,
8610 				   NULL);
8611 
8612 	aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
8613 
8614 	if (!res)
8615 		aencoder->encoder_id = link_index;
8616 	else
8617 		aencoder->encoder_id = -1;
8618 
8619 	drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
8620 
8621 	return res;
8622 }
8623 
8624 static void manage_dm_interrupts(struct amdgpu_device *adev,
8625 				 struct amdgpu_crtc *acrtc,
8626 				 bool enable)
8627 {
8628 	/*
8629 	 * We have no guarantee that the frontend index maps to the same
8630 	 * backend index - some even map to more than one.
8631 	 *
8632 	 * TODO: Use a different interrupt or check DC itself for the mapping.
8633 	 */
8634 	int irq_type =
8635 		amdgpu_display_crtc_idx_to_irq_type(
8636 			adev,
8637 			acrtc->crtc_id);
8638 
8639 	if (enable) {
8640 		drm_crtc_vblank_on(&acrtc->base);
8641 		amdgpu_irq_get(
8642 			adev,
8643 			&adev->pageflip_irq,
8644 			irq_type);
8645 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8646 		amdgpu_irq_get(
8647 			adev,
8648 			&adev->vline0_irq,
8649 			irq_type);
8650 #endif
8651 	} else {
8652 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8653 		amdgpu_irq_put(
8654 			adev,
8655 			&adev->vline0_irq,
8656 			irq_type);
8657 #endif
8658 		amdgpu_irq_put(
8659 			adev,
8660 			&adev->pageflip_irq,
8661 			irq_type);
8662 		drm_crtc_vblank_off(&acrtc->base);
8663 	}
8664 }
8665 
8666 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
8667 				      struct amdgpu_crtc *acrtc)
8668 {
8669 	int irq_type =
8670 		amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
8671 
8672 	/**
8673 	 * This reads the current state for the IRQ and force reapplies
8674 	 * the setting to hardware.
8675 	 */
8676 	amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
8677 }
8678 
8679 static bool
8680 is_scaling_state_different(const struct dm_connector_state *dm_state,
8681 			   const struct dm_connector_state *old_dm_state)
8682 {
8683 	if (dm_state->scaling != old_dm_state->scaling)
8684 		return true;
8685 	if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
8686 		if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
8687 			return true;
8688 	} else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
8689 		if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
8690 			return true;
8691 	} else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
8692 		   dm_state->underscan_vborder != old_dm_state->underscan_vborder)
8693 		return true;
8694 	return false;
8695 }
8696 
8697 #ifdef CONFIG_DRM_AMD_DC_HDCP
8698 static bool is_content_protection_different(struct drm_connector_state *state,
8699 					    const struct drm_connector_state *old_state,
8700 					    const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
8701 {
8702 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8703 	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
8704 
8705 	/* Handle: Type0/1 change */
8706 	if (old_state->hdcp_content_type != state->hdcp_content_type &&
8707 	    state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
8708 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8709 		return true;
8710 	}
8711 
8712 	/* CP is being re enabled, ignore this
8713 	 *
8714 	 * Handles:	ENABLED -> DESIRED
8715 	 */
8716 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
8717 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8718 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
8719 		return false;
8720 	}
8721 
8722 	/* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
8723 	 *
8724 	 * Handles:	UNDESIRED -> ENABLED
8725 	 */
8726 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
8727 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
8728 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8729 
8730 	/* Stream removed and re-enabled
8731 	 *
8732 	 * Can sometimes overlap with the HPD case,
8733 	 * thus set update_hdcp to false to avoid
8734 	 * setting HDCP multiple times.
8735 	 *
8736 	 * Handles:	DESIRED -> DESIRED (Special case)
8737 	 */
8738 	if (!(old_state->crtc && old_state->crtc->enabled) &&
8739 		state->crtc && state->crtc->enabled &&
8740 		connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8741 		dm_con_state->update_hdcp = false;
8742 		return true;
8743 	}
8744 
8745 	/* Hot-plug, headless s3, dpms
8746 	 *
8747 	 * Only start HDCP if the display is connected/enabled.
8748 	 * update_hdcp flag will be set to false until the next
8749 	 * HPD comes in.
8750 	 *
8751 	 * Handles:	DESIRED -> DESIRED (Special case)
8752 	 */
8753 	if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
8754 	    connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
8755 		dm_con_state->update_hdcp = false;
8756 		return true;
8757 	}
8758 
8759 	/*
8760 	 * Handles:	UNDESIRED -> UNDESIRED
8761 	 *		DESIRED -> DESIRED
8762 	 *		ENABLED -> ENABLED
8763 	 */
8764 	if (old_state->content_protection == state->content_protection)
8765 		return false;
8766 
8767 	/*
8768 	 * Handles:	UNDESIRED -> DESIRED
8769 	 *		DESIRED -> UNDESIRED
8770 	 *		ENABLED -> UNDESIRED
8771 	 */
8772 	if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
8773 		return true;
8774 
8775 	/*
8776 	 * Handles:	DESIRED -> ENABLED
8777 	 */
8778 	return false;
8779 }
8780 
8781 #endif
8782 static void remove_stream(struct amdgpu_device *adev,
8783 			  struct amdgpu_crtc *acrtc,
8784 			  struct dc_stream_state *stream)
8785 {
8786 	/* this is the update mode case */
8787 
8788 	acrtc->otg_inst = -1;
8789 	acrtc->enabled = false;
8790 }
8791 
8792 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
8793 			       struct dc_cursor_position *position)
8794 {
8795 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8796 	int x, y;
8797 	int xorigin = 0, yorigin = 0;
8798 
8799 	if (!crtc || !plane->state->fb)
8800 		return 0;
8801 
8802 	if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
8803 	    (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
8804 		DRM_ERROR("%s: bad cursor width or height %d x %d\n",
8805 			  __func__,
8806 			  plane->state->crtc_w,
8807 			  plane->state->crtc_h);
8808 		return -EINVAL;
8809 	}
8810 
8811 	x = plane->state->crtc_x;
8812 	y = plane->state->crtc_y;
8813 
8814 	if (x <= -amdgpu_crtc->max_cursor_width ||
8815 	    y <= -amdgpu_crtc->max_cursor_height)
8816 		return 0;
8817 
8818 	if (x < 0) {
8819 		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
8820 		x = 0;
8821 	}
8822 	if (y < 0) {
8823 		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
8824 		y = 0;
8825 	}
8826 	position->enable = true;
8827 	position->translate_by_source = true;
8828 	position->x = x;
8829 	position->y = y;
8830 	position->x_hotspot = xorigin;
8831 	position->y_hotspot = yorigin;
8832 
8833 	return 0;
8834 }
8835 
8836 static void handle_cursor_update(struct drm_plane *plane,
8837 				 struct drm_plane_state *old_plane_state)
8838 {
8839 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
8840 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
8841 	struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
8842 	struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
8843 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8844 	uint64_t address = afb ? afb->address : 0;
8845 	struct dc_cursor_position position = {0};
8846 	struct dc_cursor_attributes attributes;
8847 	int ret;
8848 
8849 	if (!plane->state->fb && !old_plane_state->fb)
8850 		return;
8851 
8852 	DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
8853 		      __func__,
8854 		      amdgpu_crtc->crtc_id,
8855 		      plane->state->crtc_w,
8856 		      plane->state->crtc_h);
8857 
8858 	ret = get_cursor_position(plane, crtc, &position);
8859 	if (ret)
8860 		return;
8861 
8862 	if (!position.enable) {
8863 		/* turn off cursor */
8864 		if (crtc_state && crtc_state->stream) {
8865 			mutex_lock(&adev->dm.dc_lock);
8866 			dc_stream_set_cursor_position(crtc_state->stream,
8867 						      &position);
8868 			mutex_unlock(&adev->dm.dc_lock);
8869 		}
8870 		return;
8871 	}
8872 
8873 	amdgpu_crtc->cursor_width = plane->state->crtc_w;
8874 	amdgpu_crtc->cursor_height = plane->state->crtc_h;
8875 
8876 	memset(&attributes, 0, sizeof(attributes));
8877 	attributes.address.high_part = upper_32_bits(address);
8878 	attributes.address.low_part  = lower_32_bits(address);
8879 	attributes.width             = plane->state->crtc_w;
8880 	attributes.height            = plane->state->crtc_h;
8881 	attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
8882 	attributes.rotation_angle    = 0;
8883 	attributes.attribute_flags.value = 0;
8884 
8885 	attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
8886 
8887 	if (crtc_state->stream) {
8888 		mutex_lock(&adev->dm.dc_lock);
8889 		if (!dc_stream_set_cursor_attributes(crtc_state->stream,
8890 							 &attributes))
8891 			DRM_ERROR("DC failed to set cursor attributes\n");
8892 
8893 		if (!dc_stream_set_cursor_position(crtc_state->stream,
8894 						   &position))
8895 			DRM_ERROR("DC failed to set cursor position\n");
8896 		mutex_unlock(&adev->dm.dc_lock);
8897 	}
8898 }
8899 
8900 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
8901 {
8902 
8903 	assert_spin_locked(&acrtc->base.dev->event_lock);
8904 	WARN_ON(acrtc->event);
8905 
8906 	acrtc->event = acrtc->base.state->event;
8907 
8908 	/* Set the flip status */
8909 	acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
8910 
8911 	/* Mark this event as consumed */
8912 	acrtc->base.state->event = NULL;
8913 
8914 	DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
8915 		     acrtc->crtc_id);
8916 }
8917 
8918 static void update_freesync_state_on_stream(
8919 	struct amdgpu_display_manager *dm,
8920 	struct dm_crtc_state *new_crtc_state,
8921 	struct dc_stream_state *new_stream,
8922 	struct dc_plane_state *surface,
8923 	u32 flip_timestamp_in_us)
8924 {
8925 	struct mod_vrr_params vrr_params;
8926 	struct dc_info_packet vrr_infopacket = {0};
8927 	struct amdgpu_device *adev = dm->adev;
8928 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8929 	unsigned long flags;
8930 	bool pack_sdp_v1_3 = false;
8931 
8932 	if (!new_stream)
8933 		return;
8934 
8935 	/*
8936 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8937 	 * For now it's sufficient to just guard against these conditions.
8938 	 */
8939 
8940 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8941 		return;
8942 
8943 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8944         vrr_params = acrtc->dm_irq_params.vrr_params;
8945 
8946 	if (surface) {
8947 		mod_freesync_handle_preflip(
8948 			dm->freesync_module,
8949 			surface,
8950 			new_stream,
8951 			flip_timestamp_in_us,
8952 			&vrr_params);
8953 
8954 		if (adev->family < AMDGPU_FAMILY_AI &&
8955 		    amdgpu_dm_vrr_active(new_crtc_state)) {
8956 			mod_freesync_handle_v_update(dm->freesync_module,
8957 						     new_stream, &vrr_params);
8958 
8959 			/* Need to call this before the frame ends. */
8960 			dc_stream_adjust_vmin_vmax(dm->dc,
8961 						   new_crtc_state->stream,
8962 						   &vrr_params.adjust);
8963 		}
8964 	}
8965 
8966 	mod_freesync_build_vrr_infopacket(
8967 		dm->freesync_module,
8968 		new_stream,
8969 		&vrr_params,
8970 		PACKET_TYPE_VRR,
8971 		TRANSFER_FUNC_UNKNOWN,
8972 		&vrr_infopacket,
8973 		pack_sdp_v1_3);
8974 
8975 	new_crtc_state->freesync_timing_changed |=
8976 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8977 			&vrr_params.adjust,
8978 			sizeof(vrr_params.adjust)) != 0);
8979 
8980 	new_crtc_state->freesync_vrr_info_changed |=
8981 		(memcmp(&new_crtc_state->vrr_infopacket,
8982 			&vrr_infopacket,
8983 			sizeof(vrr_infopacket)) != 0);
8984 
8985 	acrtc->dm_irq_params.vrr_params = vrr_params;
8986 	new_crtc_state->vrr_infopacket = vrr_infopacket;
8987 
8988 	new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
8989 	new_stream->vrr_infopacket = vrr_infopacket;
8990 
8991 	if (new_crtc_state->freesync_vrr_info_changed)
8992 		DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
8993 			      new_crtc_state->base.crtc->base.id,
8994 			      (int)new_crtc_state->base.vrr_enabled,
8995 			      (int)vrr_params.state);
8996 
8997 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8998 }
8999 
9000 static void update_stream_irq_parameters(
9001 	struct amdgpu_display_manager *dm,
9002 	struct dm_crtc_state *new_crtc_state)
9003 {
9004 	struct dc_stream_state *new_stream = new_crtc_state->stream;
9005 	struct mod_vrr_params vrr_params;
9006 	struct mod_freesync_config config = new_crtc_state->freesync_config;
9007 	struct amdgpu_device *adev = dm->adev;
9008 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
9009 	unsigned long flags;
9010 
9011 	if (!new_stream)
9012 		return;
9013 
9014 	/*
9015 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
9016 	 * For now it's sufficient to just guard against these conditions.
9017 	 */
9018 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
9019 		return;
9020 
9021 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9022 	vrr_params = acrtc->dm_irq_params.vrr_params;
9023 
9024 	if (new_crtc_state->vrr_supported &&
9025 	    config.min_refresh_in_uhz &&
9026 	    config.max_refresh_in_uhz) {
9027 		/*
9028 		 * if freesync compatible mode was set, config.state will be set
9029 		 * in atomic check
9030 		 */
9031 		if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
9032 		    (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
9033 		     new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
9034 			vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
9035 			vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
9036 			vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
9037 			vrr_params.state = VRR_STATE_ACTIVE_FIXED;
9038 		} else {
9039 			config.state = new_crtc_state->base.vrr_enabled ?
9040 						     VRR_STATE_ACTIVE_VARIABLE :
9041 						     VRR_STATE_INACTIVE;
9042 		}
9043 	} else {
9044 		config.state = VRR_STATE_UNSUPPORTED;
9045 	}
9046 
9047 	mod_freesync_build_vrr_params(dm->freesync_module,
9048 				      new_stream,
9049 				      &config, &vrr_params);
9050 
9051 	new_crtc_state->freesync_timing_changed |=
9052 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
9053 			&vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
9054 
9055 	new_crtc_state->freesync_config = config;
9056 	/* Copy state for access from DM IRQ handler */
9057 	acrtc->dm_irq_params.freesync_config = config;
9058 	acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
9059 	acrtc->dm_irq_params.vrr_params = vrr_params;
9060 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9061 }
9062 
9063 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
9064 					    struct dm_crtc_state *new_state)
9065 {
9066 	bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
9067 	bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
9068 
9069 	if (!old_vrr_active && new_vrr_active) {
9070 		/* Transition VRR inactive -> active:
9071 		 * While VRR is active, we must not disable vblank irq, as a
9072 		 * reenable after disable would compute bogus vblank/pflip
9073 		 * timestamps if it likely happened inside display front-porch.
9074 		 *
9075 		 * We also need vupdate irq for the actual core vblank handling
9076 		 * at end of vblank.
9077 		 */
9078 		dm_set_vupdate_irq(new_state->base.crtc, true);
9079 		drm_crtc_vblank_get(new_state->base.crtc);
9080 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
9081 				 __func__, new_state->base.crtc->base.id);
9082 	} else if (old_vrr_active && !new_vrr_active) {
9083 		/* Transition VRR active -> inactive:
9084 		 * Allow vblank irq disable again for fixed refresh rate.
9085 		 */
9086 		dm_set_vupdate_irq(new_state->base.crtc, false);
9087 		drm_crtc_vblank_put(new_state->base.crtc);
9088 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
9089 				 __func__, new_state->base.crtc->base.id);
9090 	}
9091 }
9092 
9093 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
9094 {
9095 	struct drm_plane *plane;
9096 	struct drm_plane_state *old_plane_state;
9097 	int i;
9098 
9099 	/*
9100 	 * TODO: Make this per-stream so we don't issue redundant updates for
9101 	 * commits with multiple streams.
9102 	 */
9103 	for_each_old_plane_in_state(state, plane, old_plane_state, i)
9104 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
9105 			handle_cursor_update(plane, old_plane_state);
9106 }
9107 
9108 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
9109 				    struct dc_state *dc_state,
9110 				    struct drm_device *dev,
9111 				    struct amdgpu_display_manager *dm,
9112 				    struct drm_crtc *pcrtc,
9113 				    bool wait_for_vblank)
9114 {
9115 	uint32_t i;
9116 	uint64_t timestamp_ns;
9117 	struct drm_plane *plane;
9118 	struct drm_plane_state *old_plane_state, *new_plane_state;
9119 	struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
9120 	struct drm_crtc_state *new_pcrtc_state =
9121 			drm_atomic_get_new_crtc_state(state, pcrtc);
9122 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
9123 	struct dm_crtc_state *dm_old_crtc_state =
9124 			to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
9125 	int planes_count = 0, vpos, hpos;
9126 	long r;
9127 	unsigned long flags;
9128 	struct amdgpu_bo *abo;
9129 	uint32_t target_vblank, last_flip_vblank;
9130 	bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
9131 	bool pflip_present = false;
9132 	struct {
9133 		struct dc_surface_update surface_updates[MAX_SURFACES];
9134 		struct dc_plane_info plane_infos[MAX_SURFACES];
9135 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
9136 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
9137 		struct dc_stream_update stream_update;
9138 	} *bundle;
9139 
9140 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
9141 
9142 	if (!bundle) {
9143 		dm_error("Failed to allocate update bundle\n");
9144 		goto cleanup;
9145 	}
9146 
9147 	/*
9148 	 * Disable the cursor first if we're disabling all the planes.
9149 	 * It'll remain on the screen after the planes are re-enabled
9150 	 * if we don't.
9151 	 */
9152 	if (acrtc_state->active_planes == 0)
9153 		amdgpu_dm_commit_cursors(state);
9154 
9155 	/* update planes when needed */
9156 	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
9157 		struct drm_crtc *crtc = new_plane_state->crtc;
9158 		struct drm_crtc_state *new_crtc_state;
9159 		struct drm_framebuffer *fb = new_plane_state->fb;
9160 		struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
9161 		bool plane_needs_flip;
9162 		struct dc_plane_state *dc_plane;
9163 		struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
9164 
9165 		/* Cursor plane is handled after stream updates */
9166 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
9167 			continue;
9168 
9169 		if (!fb || !crtc || pcrtc != crtc)
9170 			continue;
9171 
9172 		new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
9173 		if (!new_crtc_state->active)
9174 			continue;
9175 
9176 		dc_plane = dm_new_plane_state->dc_state;
9177 
9178 		bundle->surface_updates[planes_count].surface = dc_plane;
9179 		if (new_pcrtc_state->color_mgmt_changed) {
9180 			bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
9181 			bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
9182 			bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
9183 		}
9184 
9185 		fill_dc_scaling_info(dm->adev, new_plane_state,
9186 				     &bundle->scaling_infos[planes_count]);
9187 
9188 		bundle->surface_updates[planes_count].scaling_info =
9189 			&bundle->scaling_infos[planes_count];
9190 
9191 		plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
9192 
9193 		pflip_present = pflip_present || plane_needs_flip;
9194 
9195 		if (!plane_needs_flip) {
9196 			planes_count += 1;
9197 			continue;
9198 		}
9199 
9200 		abo = gem_to_amdgpu_bo(fb->obj[0]);
9201 
9202 		/*
9203 		 * Wait for all fences on this FB. Do limited wait to avoid
9204 		 * deadlock during GPU reset when this fence will not signal
9205 		 * but we hold reservation lock for the BO.
9206 		 */
9207 		r = dma_resv_wait_timeout(abo->tbo.base.resv, true, false,
9208 					  msecs_to_jiffies(5000));
9209 		if (unlikely(r <= 0))
9210 			DRM_ERROR("Waiting for fences timed out!");
9211 
9212 		fill_dc_plane_info_and_addr(
9213 			dm->adev, new_plane_state,
9214 			afb->tiling_flags,
9215 			&bundle->plane_infos[planes_count],
9216 			&bundle->flip_addrs[planes_count].address,
9217 			afb->tmz_surface, false);
9218 
9219 		DRM_DEBUG_ATOMIC("plane: id=%d dcc_en=%d\n",
9220 				 new_plane_state->plane->index,
9221 				 bundle->plane_infos[planes_count].dcc.enable);
9222 
9223 		bundle->surface_updates[planes_count].plane_info =
9224 			&bundle->plane_infos[planes_count];
9225 
9226 		/*
9227 		 * Only allow immediate flips for fast updates that don't
9228 		 * change FB pitch, DCC state, rotation or mirroing.
9229 		 */
9230 		bundle->flip_addrs[planes_count].flip_immediate =
9231 			crtc->state->async_flip &&
9232 			acrtc_state->update_type == UPDATE_TYPE_FAST;
9233 
9234 		timestamp_ns = ktime_get_ns();
9235 		bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
9236 		bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
9237 		bundle->surface_updates[planes_count].surface = dc_plane;
9238 
9239 		if (!bundle->surface_updates[planes_count].surface) {
9240 			DRM_ERROR("No surface for CRTC: id=%d\n",
9241 					acrtc_attach->crtc_id);
9242 			continue;
9243 		}
9244 
9245 		if (plane == pcrtc->primary)
9246 			update_freesync_state_on_stream(
9247 				dm,
9248 				acrtc_state,
9249 				acrtc_state->stream,
9250 				dc_plane,
9251 				bundle->flip_addrs[planes_count].flip_timestamp_in_us);
9252 
9253 		DRM_DEBUG_ATOMIC("%s Flipping to hi: 0x%x, low: 0x%x\n",
9254 				 __func__,
9255 				 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
9256 				 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
9257 
9258 		planes_count += 1;
9259 
9260 	}
9261 
9262 	if (pflip_present) {
9263 		if (!vrr_active) {
9264 			/* Use old throttling in non-vrr fixed refresh rate mode
9265 			 * to keep flip scheduling based on target vblank counts
9266 			 * working in a backwards compatible way, e.g., for
9267 			 * clients using the GLX_OML_sync_control extension or
9268 			 * DRI3/Present extension with defined target_msc.
9269 			 */
9270 			last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
9271 		}
9272 		else {
9273 			/* For variable refresh rate mode only:
9274 			 * Get vblank of last completed flip to avoid > 1 vrr
9275 			 * flips per video frame by use of throttling, but allow
9276 			 * flip programming anywhere in the possibly large
9277 			 * variable vrr vblank interval for fine-grained flip
9278 			 * timing control and more opportunity to avoid stutter
9279 			 * on late submission of flips.
9280 			 */
9281 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9282 			last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
9283 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9284 		}
9285 
9286 		target_vblank = last_flip_vblank + wait_for_vblank;
9287 
9288 		/*
9289 		 * Wait until we're out of the vertical blank period before the one
9290 		 * targeted by the flip
9291 		 */
9292 		while ((acrtc_attach->enabled &&
9293 			(amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
9294 							    0, &vpos, &hpos, NULL,
9295 							    NULL, &pcrtc->hwmode)
9296 			 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
9297 			(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
9298 			(int)(target_vblank -
9299 			  amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
9300 			usleep_range(1000, 1100);
9301 		}
9302 
9303 		/**
9304 		 * Prepare the flip event for the pageflip interrupt to handle.
9305 		 *
9306 		 * This only works in the case where we've already turned on the
9307 		 * appropriate hardware blocks (eg. HUBP) so in the transition case
9308 		 * from 0 -> n planes we have to skip a hardware generated event
9309 		 * and rely on sending it from software.
9310 		 */
9311 		if (acrtc_attach->base.state->event &&
9312 		    acrtc_state->active_planes > 0 &&
9313 		    !acrtc_state->force_dpms_off) {
9314 			drm_crtc_vblank_get(pcrtc);
9315 
9316 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9317 
9318 			WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
9319 			prepare_flip_isr(acrtc_attach);
9320 
9321 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9322 		}
9323 
9324 		if (acrtc_state->stream) {
9325 			if (acrtc_state->freesync_vrr_info_changed)
9326 				bundle->stream_update.vrr_infopacket =
9327 					&acrtc_state->stream->vrr_infopacket;
9328 		}
9329 	}
9330 
9331 	/* Update the planes if changed or disable if we don't have any. */
9332 	if ((planes_count || acrtc_state->active_planes == 0) &&
9333 		acrtc_state->stream) {
9334 #if defined(CONFIG_DRM_AMD_DC_DCN)
9335 		/*
9336 		 * If PSR or idle optimizations are enabled then flush out
9337 		 * any pending work before hardware programming.
9338 		 */
9339 		if (dm->vblank_control_workqueue)
9340 			flush_workqueue(dm->vblank_control_workqueue);
9341 #endif
9342 
9343 		bundle->stream_update.stream = acrtc_state->stream;
9344 		if (new_pcrtc_state->mode_changed) {
9345 			bundle->stream_update.src = acrtc_state->stream->src;
9346 			bundle->stream_update.dst = acrtc_state->stream->dst;
9347 		}
9348 
9349 		if (new_pcrtc_state->color_mgmt_changed) {
9350 			/*
9351 			 * TODO: This isn't fully correct since we've actually
9352 			 * already modified the stream in place.
9353 			 */
9354 			bundle->stream_update.gamut_remap =
9355 				&acrtc_state->stream->gamut_remap_matrix;
9356 			bundle->stream_update.output_csc_transform =
9357 				&acrtc_state->stream->csc_color_matrix;
9358 			bundle->stream_update.out_transfer_func =
9359 				acrtc_state->stream->out_transfer_func;
9360 		}
9361 
9362 		acrtc_state->stream->abm_level = acrtc_state->abm_level;
9363 		if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
9364 			bundle->stream_update.abm_level = &acrtc_state->abm_level;
9365 
9366 		/*
9367 		 * If FreeSync state on the stream has changed then we need to
9368 		 * re-adjust the min/max bounds now that DC doesn't handle this
9369 		 * as part of commit.
9370 		 */
9371 		if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
9372 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9373 			dc_stream_adjust_vmin_vmax(
9374 				dm->dc, acrtc_state->stream,
9375 				&acrtc_attach->dm_irq_params.vrr_params.adjust);
9376 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9377 		}
9378 		mutex_lock(&dm->dc_lock);
9379 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
9380 				acrtc_state->stream->link->psr_settings.psr_allow_active)
9381 			amdgpu_dm_psr_disable(acrtc_state->stream);
9382 
9383 		dc_commit_updates_for_stream(dm->dc,
9384 						     bundle->surface_updates,
9385 						     planes_count,
9386 						     acrtc_state->stream,
9387 						     &bundle->stream_update,
9388 						     dc_state);
9389 
9390 		/**
9391 		 * Enable or disable the interrupts on the backend.
9392 		 *
9393 		 * Most pipes are put into power gating when unused.
9394 		 *
9395 		 * When power gating is enabled on a pipe we lose the
9396 		 * interrupt enablement state when power gating is disabled.
9397 		 *
9398 		 * So we need to update the IRQ control state in hardware
9399 		 * whenever the pipe turns on (since it could be previously
9400 		 * power gated) or off (since some pipes can't be power gated
9401 		 * on some ASICs).
9402 		 */
9403 		if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
9404 			dm_update_pflip_irq_state(drm_to_adev(dev),
9405 						  acrtc_attach);
9406 
9407 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
9408 				acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
9409 				!acrtc_state->stream->link->psr_settings.psr_feature_enabled)
9410 			amdgpu_dm_link_setup_psr(acrtc_state->stream);
9411 
9412 		/* Decrement skip count when PSR is enabled and we're doing fast updates. */
9413 		if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
9414 		    acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
9415 			struct amdgpu_dm_connector *aconn =
9416 				(struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
9417 
9418 			if (aconn->psr_skip_count > 0)
9419 				aconn->psr_skip_count--;
9420 
9421 			/* Allow PSR when skip count is 0. */
9422 			acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count;
9423 		} else {
9424 			acrtc_attach->dm_irq_params.allow_psr_entry = false;
9425 		}
9426 
9427 		mutex_unlock(&dm->dc_lock);
9428 	}
9429 
9430 	/*
9431 	 * Update cursor state *after* programming all the planes.
9432 	 * This avoids redundant programming in the case where we're going
9433 	 * to be disabling a single plane - those pipes are being disabled.
9434 	 */
9435 	if (acrtc_state->active_planes)
9436 		amdgpu_dm_commit_cursors(state);
9437 
9438 cleanup:
9439 	kfree(bundle);
9440 }
9441 
9442 static void amdgpu_dm_commit_audio(struct drm_device *dev,
9443 				   struct drm_atomic_state *state)
9444 {
9445 	struct amdgpu_device *adev = drm_to_adev(dev);
9446 	struct amdgpu_dm_connector *aconnector;
9447 	struct drm_connector *connector;
9448 	struct drm_connector_state *old_con_state, *new_con_state;
9449 	struct drm_crtc_state *new_crtc_state;
9450 	struct dm_crtc_state *new_dm_crtc_state;
9451 	const struct dc_stream_status *status;
9452 	int i, inst;
9453 
9454 	/* Notify device removals. */
9455 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9456 		if (old_con_state->crtc != new_con_state->crtc) {
9457 			/* CRTC changes require notification. */
9458 			goto notify;
9459 		}
9460 
9461 		if (!new_con_state->crtc)
9462 			continue;
9463 
9464 		new_crtc_state = drm_atomic_get_new_crtc_state(
9465 			state, new_con_state->crtc);
9466 
9467 		if (!new_crtc_state)
9468 			continue;
9469 
9470 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9471 			continue;
9472 
9473 	notify:
9474 		aconnector = to_amdgpu_dm_connector(connector);
9475 
9476 		mutex_lock(&adev->dm.audio_lock);
9477 		inst = aconnector->audio_inst;
9478 		aconnector->audio_inst = -1;
9479 		mutex_unlock(&adev->dm.audio_lock);
9480 
9481 		amdgpu_dm_audio_eld_notify(adev, inst);
9482 	}
9483 
9484 	/* Notify audio device additions. */
9485 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
9486 		if (!new_con_state->crtc)
9487 			continue;
9488 
9489 		new_crtc_state = drm_atomic_get_new_crtc_state(
9490 			state, new_con_state->crtc);
9491 
9492 		if (!new_crtc_state)
9493 			continue;
9494 
9495 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9496 			continue;
9497 
9498 		new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
9499 		if (!new_dm_crtc_state->stream)
9500 			continue;
9501 
9502 		status = dc_stream_get_status(new_dm_crtc_state->stream);
9503 		if (!status)
9504 			continue;
9505 
9506 		aconnector = to_amdgpu_dm_connector(connector);
9507 
9508 		mutex_lock(&adev->dm.audio_lock);
9509 		inst = status->audio_inst;
9510 		aconnector->audio_inst = inst;
9511 		mutex_unlock(&adev->dm.audio_lock);
9512 
9513 		amdgpu_dm_audio_eld_notify(adev, inst);
9514 	}
9515 }
9516 
9517 /*
9518  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
9519  * @crtc_state: the DRM CRTC state
9520  * @stream_state: the DC stream state.
9521  *
9522  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
9523  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
9524  */
9525 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
9526 						struct dc_stream_state *stream_state)
9527 {
9528 	stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
9529 }
9530 
9531 /**
9532  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
9533  * @state: The atomic state to commit
9534  *
9535  * This will tell DC to commit the constructed DC state from atomic_check,
9536  * programming the hardware. Any failures here implies a hardware failure, since
9537  * atomic check should have filtered anything non-kosher.
9538  */
9539 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
9540 {
9541 	struct drm_device *dev = state->dev;
9542 	struct amdgpu_device *adev = drm_to_adev(dev);
9543 	struct amdgpu_display_manager *dm = &adev->dm;
9544 	struct dm_atomic_state *dm_state;
9545 	struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
9546 	uint32_t i, j;
9547 	struct drm_crtc *crtc;
9548 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9549 	unsigned long flags;
9550 	bool wait_for_vblank = true;
9551 	struct drm_connector *connector;
9552 	struct drm_connector_state *old_con_state, *new_con_state;
9553 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9554 	int crtc_disable_count = 0;
9555 	bool mode_set_reset_required = false;
9556 
9557 	trace_amdgpu_dm_atomic_commit_tail_begin(state);
9558 
9559 	drm_atomic_helper_update_legacy_modeset_state(dev, state);
9560 
9561 	dm_state = dm_atomic_get_new_state(state);
9562 	if (dm_state && dm_state->context) {
9563 		dc_state = dm_state->context;
9564 	} else {
9565 		/* No state changes, retain current state. */
9566 		dc_state_temp = dc_create_state(dm->dc);
9567 		ASSERT(dc_state_temp);
9568 		dc_state = dc_state_temp;
9569 		dc_resource_state_copy_construct_current(dm->dc, dc_state);
9570 	}
9571 
9572 	for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
9573 				       new_crtc_state, i) {
9574 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9575 
9576 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9577 
9578 		if (old_crtc_state->active &&
9579 		    (!new_crtc_state->active ||
9580 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9581 			manage_dm_interrupts(adev, acrtc, false);
9582 			dc_stream_release(dm_old_crtc_state->stream);
9583 		}
9584 	}
9585 
9586 	drm_atomic_helper_calc_timestamping_constants(state);
9587 
9588 	/* update changed items */
9589 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9590 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9591 
9592 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9593 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9594 
9595 		DRM_DEBUG_ATOMIC(
9596 			"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9597 			"planes_changed:%d, mode_changed:%d,active_changed:%d,"
9598 			"connectors_changed:%d\n",
9599 			acrtc->crtc_id,
9600 			new_crtc_state->enable,
9601 			new_crtc_state->active,
9602 			new_crtc_state->planes_changed,
9603 			new_crtc_state->mode_changed,
9604 			new_crtc_state->active_changed,
9605 			new_crtc_state->connectors_changed);
9606 
9607 		/* Disable cursor if disabling crtc */
9608 		if (old_crtc_state->active && !new_crtc_state->active) {
9609 			struct dc_cursor_position position;
9610 
9611 			memset(&position, 0, sizeof(position));
9612 			mutex_lock(&dm->dc_lock);
9613 			dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
9614 			mutex_unlock(&dm->dc_lock);
9615 		}
9616 
9617 		/* Copy all transient state flags into dc state */
9618 		if (dm_new_crtc_state->stream) {
9619 			amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
9620 							    dm_new_crtc_state->stream);
9621 		}
9622 
9623 		/* handles headless hotplug case, updating new_state and
9624 		 * aconnector as needed
9625 		 */
9626 
9627 		if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
9628 
9629 			DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
9630 
9631 			if (!dm_new_crtc_state->stream) {
9632 				/*
9633 				 * this could happen because of issues with
9634 				 * userspace notifications delivery.
9635 				 * In this case userspace tries to set mode on
9636 				 * display which is disconnected in fact.
9637 				 * dc_sink is NULL in this case on aconnector.
9638 				 * We expect reset mode will come soon.
9639 				 *
9640 				 * This can also happen when unplug is done
9641 				 * during resume sequence ended
9642 				 *
9643 				 * In this case, we want to pretend we still
9644 				 * have a sink to keep the pipe running so that
9645 				 * hw state is consistent with the sw state
9646 				 */
9647 				DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9648 						__func__, acrtc->base.base.id);
9649 				continue;
9650 			}
9651 
9652 			if (dm_old_crtc_state->stream)
9653 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
9654 
9655 			pm_runtime_get_noresume(dev->dev);
9656 
9657 			acrtc->enabled = true;
9658 			acrtc->hw_mode = new_crtc_state->mode;
9659 			crtc->hwmode = new_crtc_state->mode;
9660 			mode_set_reset_required = true;
9661 		} else if (modereset_required(new_crtc_state)) {
9662 			DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
9663 			/* i.e. reset mode */
9664 			if (dm_old_crtc_state->stream)
9665 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
9666 
9667 			mode_set_reset_required = true;
9668 		}
9669 	} /* for_each_crtc_in_state() */
9670 
9671 	if (dc_state) {
9672 		/* if there mode set or reset, disable eDP PSR */
9673 		if (mode_set_reset_required) {
9674 #if defined(CONFIG_DRM_AMD_DC_DCN)
9675 			if (dm->vblank_control_workqueue)
9676 				flush_workqueue(dm->vblank_control_workqueue);
9677 #endif
9678 			amdgpu_dm_psr_disable_all(dm);
9679 		}
9680 
9681 		dm_enable_per_frame_crtc_master_sync(dc_state);
9682 		mutex_lock(&dm->dc_lock);
9683 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
9684 #if defined(CONFIG_DRM_AMD_DC_DCN)
9685                /* Allow idle optimization when vblank count is 0 for display off */
9686                if (dm->active_vblank_irq_count == 0)
9687                    dc_allow_idle_optimizations(dm->dc,true);
9688 #endif
9689 		mutex_unlock(&dm->dc_lock);
9690 	}
9691 
9692 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9693 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9694 
9695 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9696 
9697 		if (dm_new_crtc_state->stream != NULL) {
9698 			const struct dc_stream_status *status =
9699 					dc_stream_get_status(dm_new_crtc_state->stream);
9700 
9701 			if (!status)
9702 				status = dc_stream_get_status_from_state(dc_state,
9703 									 dm_new_crtc_state->stream);
9704 			if (!status)
9705 				DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
9706 			else
9707 				acrtc->otg_inst = status->primary_otg_inst;
9708 		}
9709 	}
9710 #ifdef CONFIG_DRM_AMD_DC_HDCP
9711 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9712 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9713 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9714 		struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9715 
9716 		new_crtc_state = NULL;
9717 
9718 		if (acrtc)
9719 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9720 
9721 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9722 
9723 		if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
9724 		    connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
9725 			hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
9726 			new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
9727 			dm_new_con_state->update_hdcp = true;
9728 			continue;
9729 		}
9730 
9731 		if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
9732 			hdcp_update_display(
9733 				adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
9734 				new_con_state->hdcp_content_type,
9735 				new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
9736 	}
9737 #endif
9738 
9739 	/* Handle connector state changes */
9740 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9741 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9742 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9743 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9744 		struct dc_surface_update dummy_updates[MAX_SURFACES];
9745 		struct dc_stream_update stream_update;
9746 		struct dc_info_packet hdr_packet;
9747 		struct dc_stream_status *status = NULL;
9748 		bool abm_changed, hdr_changed, scaling_changed;
9749 
9750 		memset(&dummy_updates, 0, sizeof(dummy_updates));
9751 		memset(&stream_update, 0, sizeof(stream_update));
9752 
9753 		if (acrtc) {
9754 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9755 			old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
9756 		}
9757 
9758 		/* Skip any modesets/resets */
9759 		if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
9760 			continue;
9761 
9762 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9763 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9764 
9765 		scaling_changed = is_scaling_state_different(dm_new_con_state,
9766 							     dm_old_con_state);
9767 
9768 		abm_changed = dm_new_crtc_state->abm_level !=
9769 			      dm_old_crtc_state->abm_level;
9770 
9771 		hdr_changed =
9772 			!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
9773 
9774 		if (!scaling_changed && !abm_changed && !hdr_changed)
9775 			continue;
9776 
9777 		stream_update.stream = dm_new_crtc_state->stream;
9778 		if (scaling_changed) {
9779 			update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
9780 					dm_new_con_state, dm_new_crtc_state->stream);
9781 
9782 			stream_update.src = dm_new_crtc_state->stream->src;
9783 			stream_update.dst = dm_new_crtc_state->stream->dst;
9784 		}
9785 
9786 		if (abm_changed) {
9787 			dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
9788 
9789 			stream_update.abm_level = &dm_new_crtc_state->abm_level;
9790 		}
9791 
9792 		if (hdr_changed) {
9793 			fill_hdr_info_packet(new_con_state, &hdr_packet);
9794 			stream_update.hdr_static_metadata = &hdr_packet;
9795 		}
9796 
9797 		status = dc_stream_get_status(dm_new_crtc_state->stream);
9798 
9799 		if (WARN_ON(!status))
9800 			continue;
9801 
9802 		WARN_ON(!status->plane_count);
9803 
9804 		/*
9805 		 * TODO: DC refuses to perform stream updates without a dc_surface_update.
9806 		 * Here we create an empty update on each plane.
9807 		 * To fix this, DC should permit updating only stream properties.
9808 		 */
9809 		for (j = 0; j < status->plane_count; j++)
9810 			dummy_updates[j].surface = status->plane_states[0];
9811 
9812 
9813 		mutex_lock(&dm->dc_lock);
9814 		dc_commit_updates_for_stream(dm->dc,
9815 						     dummy_updates,
9816 						     status->plane_count,
9817 						     dm_new_crtc_state->stream,
9818 						     &stream_update,
9819 						     dc_state);
9820 		mutex_unlock(&dm->dc_lock);
9821 	}
9822 
9823 	/* Count number of newly disabled CRTCs for dropping PM refs later. */
9824 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
9825 				      new_crtc_state, i) {
9826 		if (old_crtc_state->active && !new_crtc_state->active)
9827 			crtc_disable_count++;
9828 
9829 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9830 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9831 
9832 		/* For freesync config update on crtc state and params for irq */
9833 		update_stream_irq_parameters(dm, dm_new_crtc_state);
9834 
9835 		/* Handle vrr on->off / off->on transitions */
9836 		amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
9837 						dm_new_crtc_state);
9838 	}
9839 
9840 	/**
9841 	 * Enable interrupts for CRTCs that are newly enabled or went through
9842 	 * a modeset. It was intentionally deferred until after the front end
9843 	 * state was modified to wait until the OTG was on and so the IRQ
9844 	 * handlers didn't access stale or invalid state.
9845 	 */
9846 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9847 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9848 #ifdef CONFIG_DEBUG_FS
9849 		bool configure_crc = false;
9850 		enum amdgpu_dm_pipe_crc_source cur_crc_src;
9851 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9852 		struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk;
9853 #endif
9854 		spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9855 		cur_crc_src = acrtc->dm_irq_params.crc_src;
9856 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9857 #endif
9858 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9859 
9860 		if (new_crtc_state->active &&
9861 		    (!old_crtc_state->active ||
9862 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9863 			dc_stream_retain(dm_new_crtc_state->stream);
9864 			acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
9865 			manage_dm_interrupts(adev, acrtc, true);
9866 
9867 #ifdef CONFIG_DEBUG_FS
9868 			/**
9869 			 * Frontend may have changed so reapply the CRC capture
9870 			 * settings for the stream.
9871 			 */
9872 			dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9873 
9874 			if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
9875 				configure_crc = true;
9876 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9877 				if (amdgpu_dm_crc_window_is_activated(crtc)) {
9878 					spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9879 					acrtc->dm_irq_params.crc_window.update_win = true;
9880 					acrtc->dm_irq_params.crc_window.skip_frame_cnt = 2;
9881 					spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
9882 					crc_rd_wrk->crtc = crtc;
9883 					spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
9884 					spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9885 				}
9886 #endif
9887 			}
9888 
9889 			if (configure_crc)
9890 				if (amdgpu_dm_crtc_configure_crc_source(
9891 					crtc, dm_new_crtc_state, cur_crc_src))
9892 					DRM_DEBUG_DRIVER("Failed to configure crc source");
9893 #endif
9894 		}
9895 	}
9896 
9897 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
9898 		if (new_crtc_state->async_flip)
9899 			wait_for_vblank = false;
9900 
9901 	/* update planes when needed per crtc*/
9902 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
9903 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9904 
9905 		if (dm_new_crtc_state->stream)
9906 			amdgpu_dm_commit_planes(state, dc_state, dev,
9907 						dm, crtc, wait_for_vblank);
9908 	}
9909 
9910 	/* Update audio instances for each connector. */
9911 	amdgpu_dm_commit_audio(dev, state);
9912 
9913 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||		\
9914 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
9915 	/* restore the backlight level */
9916 	for (i = 0; i < dm->num_of_edps; i++) {
9917 		if (dm->backlight_dev[i] &&
9918 		    (amdgpu_dm_backlight_get_level(dm, i) != dm->brightness[i]))
9919 			amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
9920 	}
9921 #endif
9922 	/*
9923 	 * send vblank event on all events not handled in flip and
9924 	 * mark consumed event for drm_atomic_helper_commit_hw_done
9925 	 */
9926 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9927 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9928 
9929 		if (new_crtc_state->event)
9930 			drm_send_event_locked(dev, &new_crtc_state->event->base);
9931 
9932 		new_crtc_state->event = NULL;
9933 	}
9934 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9935 
9936 	/* Signal HW programming completion */
9937 	drm_atomic_helper_commit_hw_done(state);
9938 
9939 	if (wait_for_vblank)
9940 		drm_atomic_helper_wait_for_flip_done(dev, state);
9941 
9942 	drm_atomic_helper_cleanup_planes(dev, state);
9943 
9944 	/* return the stolen vga memory back to VRAM */
9945 	if (!adev->mman.keep_stolen_vga_memory)
9946 		amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
9947 	amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
9948 
9949 	/*
9950 	 * Finally, drop a runtime PM reference for each newly disabled CRTC,
9951 	 * so we can put the GPU into runtime suspend if we're not driving any
9952 	 * displays anymore
9953 	 */
9954 	for (i = 0; i < crtc_disable_count; i++)
9955 		pm_runtime_put_autosuspend(dev->dev);
9956 	pm_runtime_mark_last_busy(dev->dev);
9957 
9958 	if (dc_state_temp)
9959 		dc_release_state(dc_state_temp);
9960 }
9961 
9962 
9963 static int dm_force_atomic_commit(struct drm_connector *connector)
9964 {
9965 	int ret = 0;
9966 	struct drm_device *ddev = connector->dev;
9967 	struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
9968 	struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9969 	struct drm_plane *plane = disconnected_acrtc->base.primary;
9970 	struct drm_connector_state *conn_state;
9971 	struct drm_crtc_state *crtc_state;
9972 	struct drm_plane_state *plane_state;
9973 
9974 	if (!state)
9975 		return -ENOMEM;
9976 
9977 	state->acquire_ctx = ddev->mode_config.acquire_ctx;
9978 
9979 	/* Construct an atomic state to restore previous display setting */
9980 
9981 	/*
9982 	 * Attach connectors to drm_atomic_state
9983 	 */
9984 	conn_state = drm_atomic_get_connector_state(state, connector);
9985 
9986 	ret = PTR_ERR_OR_ZERO(conn_state);
9987 	if (ret)
9988 		goto out;
9989 
9990 	/* Attach crtc to drm_atomic_state*/
9991 	crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
9992 
9993 	ret = PTR_ERR_OR_ZERO(crtc_state);
9994 	if (ret)
9995 		goto out;
9996 
9997 	/* force a restore */
9998 	crtc_state->mode_changed = true;
9999 
10000 	/* Attach plane to drm_atomic_state */
10001 	plane_state = drm_atomic_get_plane_state(state, plane);
10002 
10003 	ret = PTR_ERR_OR_ZERO(plane_state);
10004 	if (ret)
10005 		goto out;
10006 
10007 	/* Call commit internally with the state we just constructed */
10008 	ret = drm_atomic_commit(state);
10009 
10010 out:
10011 	drm_atomic_state_put(state);
10012 	if (ret)
10013 		DRM_ERROR("Restoring old state failed with %i\n", ret);
10014 
10015 	return ret;
10016 }
10017 
10018 /*
10019  * This function handles all cases when set mode does not come upon hotplug.
10020  * This includes when a display is unplugged then plugged back into the
10021  * same port and when running without usermode desktop manager supprot
10022  */
10023 void dm_restore_drm_connector_state(struct drm_device *dev,
10024 				    struct drm_connector *connector)
10025 {
10026 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
10027 	struct amdgpu_crtc *disconnected_acrtc;
10028 	struct dm_crtc_state *acrtc_state;
10029 
10030 	if (!aconnector->dc_sink || !connector->state || !connector->encoder)
10031 		return;
10032 
10033 	disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
10034 	if (!disconnected_acrtc)
10035 		return;
10036 
10037 	acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
10038 	if (!acrtc_state->stream)
10039 		return;
10040 
10041 	/*
10042 	 * If the previous sink is not released and different from the current,
10043 	 * we deduce we are in a state where we can not rely on usermode call
10044 	 * to turn on the display, so we do it here
10045 	 */
10046 	if (acrtc_state->stream->sink != aconnector->dc_sink)
10047 		dm_force_atomic_commit(&aconnector->base);
10048 }
10049 
10050 /*
10051  * Grabs all modesetting locks to serialize against any blocking commits,
10052  * Waits for completion of all non blocking commits.
10053  */
10054 static int do_aquire_global_lock(struct drm_device *dev,
10055 				 struct drm_atomic_state *state)
10056 {
10057 	struct drm_crtc *crtc;
10058 	struct drm_crtc_commit *commit;
10059 	long ret;
10060 
10061 	/*
10062 	 * Adding all modeset locks to aquire_ctx will
10063 	 * ensure that when the framework release it the
10064 	 * extra locks we are locking here will get released to
10065 	 */
10066 	ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
10067 	if (ret)
10068 		return ret;
10069 
10070 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
10071 		spin_lock(&crtc->commit_lock);
10072 		commit = list_first_entry_or_null(&crtc->commit_list,
10073 				struct drm_crtc_commit, commit_entry);
10074 		if (commit)
10075 			drm_crtc_commit_get(commit);
10076 		spin_unlock(&crtc->commit_lock);
10077 
10078 		if (!commit)
10079 			continue;
10080 
10081 		/*
10082 		 * Make sure all pending HW programming completed and
10083 		 * page flips done
10084 		 */
10085 		ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
10086 
10087 		if (ret > 0)
10088 			ret = wait_for_completion_interruptible_timeout(
10089 					&commit->flip_done, 10*HZ);
10090 
10091 		if (ret == 0)
10092 			DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
10093 				  "timed out\n", crtc->base.id, crtc->name);
10094 
10095 		drm_crtc_commit_put(commit);
10096 	}
10097 
10098 	return ret < 0 ? ret : 0;
10099 }
10100 
10101 static void get_freesync_config_for_crtc(
10102 	struct dm_crtc_state *new_crtc_state,
10103 	struct dm_connector_state *new_con_state)
10104 {
10105 	struct mod_freesync_config config = {0};
10106 	struct amdgpu_dm_connector *aconnector =
10107 			to_amdgpu_dm_connector(new_con_state->base.connector);
10108 	struct drm_display_mode *mode = &new_crtc_state->base.mode;
10109 	int vrefresh = drm_mode_vrefresh(mode);
10110 	bool fs_vid_mode = false;
10111 
10112 	new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
10113 					vrefresh >= aconnector->min_vfreq &&
10114 					vrefresh <= aconnector->max_vfreq;
10115 
10116 	if (new_crtc_state->vrr_supported) {
10117 		new_crtc_state->stream->ignore_msa_timing_param = true;
10118 		fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
10119 
10120 		config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
10121 		config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
10122 		config.vsif_supported = true;
10123 		config.btr = true;
10124 
10125 		if (fs_vid_mode) {
10126 			config.state = VRR_STATE_ACTIVE_FIXED;
10127 			config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
10128 			goto out;
10129 		} else if (new_crtc_state->base.vrr_enabled) {
10130 			config.state = VRR_STATE_ACTIVE_VARIABLE;
10131 		} else {
10132 			config.state = VRR_STATE_INACTIVE;
10133 		}
10134 	}
10135 out:
10136 	new_crtc_state->freesync_config = config;
10137 }
10138 
10139 static void reset_freesync_config_for_crtc(
10140 	struct dm_crtc_state *new_crtc_state)
10141 {
10142 	new_crtc_state->vrr_supported = false;
10143 
10144 	memset(&new_crtc_state->vrr_infopacket, 0,
10145 	       sizeof(new_crtc_state->vrr_infopacket));
10146 }
10147 
10148 static bool
10149 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
10150 				 struct drm_crtc_state *new_crtc_state)
10151 {
10152 	struct drm_display_mode old_mode, new_mode;
10153 
10154 	if (!old_crtc_state || !new_crtc_state)
10155 		return false;
10156 
10157 	old_mode = old_crtc_state->mode;
10158 	new_mode = new_crtc_state->mode;
10159 
10160 	if (old_mode.clock       == new_mode.clock &&
10161 	    old_mode.hdisplay    == new_mode.hdisplay &&
10162 	    old_mode.vdisplay    == new_mode.vdisplay &&
10163 	    old_mode.htotal      == new_mode.htotal &&
10164 	    old_mode.vtotal      != new_mode.vtotal &&
10165 	    old_mode.hsync_start == new_mode.hsync_start &&
10166 	    old_mode.vsync_start != new_mode.vsync_start &&
10167 	    old_mode.hsync_end   == new_mode.hsync_end &&
10168 	    old_mode.vsync_end   != new_mode.vsync_end &&
10169 	    old_mode.hskew       == new_mode.hskew &&
10170 	    old_mode.vscan       == new_mode.vscan &&
10171 	    (old_mode.vsync_end - old_mode.vsync_start) ==
10172 	    (new_mode.vsync_end - new_mode.vsync_start))
10173 		return true;
10174 
10175 	return false;
10176 }
10177 
10178 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
10179 	uint64_t num, den, res;
10180 	struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
10181 
10182 	dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
10183 
10184 	num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
10185 	den = (unsigned long long)new_crtc_state->mode.htotal *
10186 	      (unsigned long long)new_crtc_state->mode.vtotal;
10187 
10188 	res = div_u64(num, den);
10189 	dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
10190 }
10191 
10192 int dm_update_crtc_state(struct amdgpu_display_manager *dm,
10193 			 struct drm_atomic_state *state,
10194 			 struct drm_crtc *crtc,
10195 			 struct drm_crtc_state *old_crtc_state,
10196 			 struct drm_crtc_state *new_crtc_state,
10197 			 bool enable,
10198 			 bool *lock_and_validation_needed)
10199 {
10200 	struct dm_atomic_state *dm_state = NULL;
10201 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
10202 	struct dc_stream_state *new_stream;
10203 	int ret = 0;
10204 
10205 	/*
10206 	 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
10207 	 * update changed items
10208 	 */
10209 	struct amdgpu_crtc *acrtc = NULL;
10210 	struct amdgpu_dm_connector *aconnector = NULL;
10211 	struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
10212 	struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
10213 
10214 	new_stream = NULL;
10215 
10216 	dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10217 	dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10218 	acrtc = to_amdgpu_crtc(crtc);
10219 	aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
10220 
10221 	/* TODO This hack should go away */
10222 	if (aconnector && enable) {
10223 		/* Make sure fake sink is created in plug-in scenario */
10224 		drm_new_conn_state = drm_atomic_get_new_connector_state(state,
10225 							    &aconnector->base);
10226 		drm_old_conn_state = drm_atomic_get_old_connector_state(state,
10227 							    &aconnector->base);
10228 
10229 		if (IS_ERR(drm_new_conn_state)) {
10230 			ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
10231 			goto fail;
10232 		}
10233 
10234 		dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
10235 		dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
10236 
10237 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10238 			goto skip_modeset;
10239 
10240 		new_stream = create_validate_stream_for_sink(aconnector,
10241 							     &new_crtc_state->mode,
10242 							     dm_new_conn_state,
10243 							     dm_old_crtc_state->stream);
10244 
10245 		/*
10246 		 * we can have no stream on ACTION_SET if a display
10247 		 * was disconnected during S3, in this case it is not an
10248 		 * error, the OS will be updated after detection, and
10249 		 * will do the right thing on next atomic commit
10250 		 */
10251 
10252 		if (!new_stream) {
10253 			DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
10254 					__func__, acrtc->base.base.id);
10255 			ret = -ENOMEM;
10256 			goto fail;
10257 		}
10258 
10259 		/*
10260 		 * TODO: Check VSDB bits to decide whether this should
10261 		 * be enabled or not.
10262 		 */
10263 		new_stream->triggered_crtc_reset.enabled =
10264 			dm->force_timing_sync;
10265 
10266 		dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10267 
10268 		ret = fill_hdr_info_packet(drm_new_conn_state,
10269 					   &new_stream->hdr_static_metadata);
10270 		if (ret)
10271 			goto fail;
10272 
10273 		/*
10274 		 * If we already removed the old stream from the context
10275 		 * (and set the new stream to NULL) then we can't reuse
10276 		 * the old stream even if the stream and scaling are unchanged.
10277 		 * We'll hit the BUG_ON and black screen.
10278 		 *
10279 		 * TODO: Refactor this function to allow this check to work
10280 		 * in all conditions.
10281 		 */
10282 		if (dm_new_crtc_state->stream &&
10283 		    is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
10284 			goto skip_modeset;
10285 
10286 		if (dm_new_crtc_state->stream &&
10287 		    dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
10288 		    dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
10289 			new_crtc_state->mode_changed = false;
10290 			DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
10291 					 new_crtc_state->mode_changed);
10292 		}
10293 	}
10294 
10295 	/* mode_changed flag may get updated above, need to check again */
10296 	if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10297 		goto skip_modeset;
10298 
10299 	DRM_DEBUG_ATOMIC(
10300 		"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
10301 		"planes_changed:%d, mode_changed:%d,active_changed:%d,"
10302 		"connectors_changed:%d\n",
10303 		acrtc->crtc_id,
10304 		new_crtc_state->enable,
10305 		new_crtc_state->active,
10306 		new_crtc_state->planes_changed,
10307 		new_crtc_state->mode_changed,
10308 		new_crtc_state->active_changed,
10309 		new_crtc_state->connectors_changed);
10310 
10311 	/* Remove stream for any changed/disabled CRTC */
10312 	if (!enable) {
10313 
10314 		if (!dm_old_crtc_state->stream)
10315 			goto skip_modeset;
10316 
10317 		if (dm_new_crtc_state->stream &&
10318 		    is_timing_unchanged_for_freesync(new_crtc_state,
10319 						     old_crtc_state)) {
10320 			new_crtc_state->mode_changed = false;
10321 			DRM_DEBUG_DRIVER(
10322 				"Mode change not required for front porch change, "
10323 				"setting mode_changed to %d",
10324 				new_crtc_state->mode_changed);
10325 
10326 			set_freesync_fixed_config(dm_new_crtc_state);
10327 
10328 			goto skip_modeset;
10329 		} else if (aconnector &&
10330 			   is_freesync_video_mode(&new_crtc_state->mode,
10331 						  aconnector)) {
10332 			struct drm_display_mode *high_mode;
10333 
10334 			high_mode = get_highest_refresh_rate_mode(aconnector, false);
10335 			if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) {
10336 				set_freesync_fixed_config(dm_new_crtc_state);
10337 			}
10338 		}
10339 
10340 		ret = dm_atomic_get_state(state, &dm_state);
10341 		if (ret)
10342 			goto fail;
10343 
10344 		DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
10345 				crtc->base.id);
10346 
10347 		/* i.e. reset mode */
10348 		if (dc_remove_stream_from_ctx(
10349 				dm->dc,
10350 				dm_state->context,
10351 				dm_old_crtc_state->stream) != DC_OK) {
10352 			ret = -EINVAL;
10353 			goto fail;
10354 		}
10355 
10356 		dc_stream_release(dm_old_crtc_state->stream);
10357 		dm_new_crtc_state->stream = NULL;
10358 
10359 		reset_freesync_config_for_crtc(dm_new_crtc_state);
10360 
10361 		*lock_and_validation_needed = true;
10362 
10363 	} else {/* Add stream for any updated/enabled CRTC */
10364 		/*
10365 		 * Quick fix to prevent NULL pointer on new_stream when
10366 		 * added MST connectors not found in existing crtc_state in the chained mode
10367 		 * TODO: need to dig out the root cause of that
10368 		 */
10369 		if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
10370 			goto skip_modeset;
10371 
10372 		if (modereset_required(new_crtc_state))
10373 			goto skip_modeset;
10374 
10375 		if (modeset_required(new_crtc_state, new_stream,
10376 				     dm_old_crtc_state->stream)) {
10377 
10378 			WARN_ON(dm_new_crtc_state->stream);
10379 
10380 			ret = dm_atomic_get_state(state, &dm_state);
10381 			if (ret)
10382 				goto fail;
10383 
10384 			dm_new_crtc_state->stream = new_stream;
10385 
10386 			dc_stream_retain(new_stream);
10387 
10388 			DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
10389 					 crtc->base.id);
10390 
10391 			if (dc_add_stream_to_ctx(
10392 					dm->dc,
10393 					dm_state->context,
10394 					dm_new_crtc_state->stream) != DC_OK) {
10395 				ret = -EINVAL;
10396 				goto fail;
10397 			}
10398 
10399 			*lock_and_validation_needed = true;
10400 		}
10401 	}
10402 
10403 skip_modeset:
10404 	/* Release extra reference */
10405 	if (new_stream)
10406 		 dc_stream_release(new_stream);
10407 
10408 	/*
10409 	 * We want to do dc stream updates that do not require a
10410 	 * full modeset below.
10411 	 */
10412 	if (!(enable && aconnector && new_crtc_state->active))
10413 		return 0;
10414 	/*
10415 	 * Given above conditions, the dc state cannot be NULL because:
10416 	 * 1. We're in the process of enabling CRTCs (just been added
10417 	 *    to the dc context, or already is on the context)
10418 	 * 2. Has a valid connector attached, and
10419 	 * 3. Is currently active and enabled.
10420 	 * => The dc stream state currently exists.
10421 	 */
10422 	BUG_ON(dm_new_crtc_state->stream == NULL);
10423 
10424 	/* Scaling or underscan settings */
10425 	if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
10426 				drm_atomic_crtc_needs_modeset(new_crtc_state))
10427 		update_stream_scaling_settings(
10428 			&new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
10429 
10430 	/* ABM settings */
10431 	dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10432 
10433 	/*
10434 	 * Color management settings. We also update color properties
10435 	 * when a modeset is needed, to ensure it gets reprogrammed.
10436 	 */
10437 	if (dm_new_crtc_state->base.color_mgmt_changed ||
10438 	    drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10439 		ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
10440 		if (ret)
10441 			goto fail;
10442 	}
10443 
10444 	/* Update Freesync settings. */
10445 	get_freesync_config_for_crtc(dm_new_crtc_state,
10446 				     dm_new_conn_state);
10447 
10448 	return ret;
10449 
10450 fail:
10451 	if (new_stream)
10452 		dc_stream_release(new_stream);
10453 	return ret;
10454 }
10455 
10456 static bool should_reset_plane(struct drm_atomic_state *state,
10457 			       struct drm_plane *plane,
10458 			       struct drm_plane_state *old_plane_state,
10459 			       struct drm_plane_state *new_plane_state)
10460 {
10461 	struct drm_plane *other;
10462 	struct drm_plane_state *old_other_state, *new_other_state;
10463 	struct drm_crtc_state *new_crtc_state;
10464 	int i;
10465 
10466 	/*
10467 	 * TODO: Remove this hack once the checks below are sufficient
10468 	 * enough to determine when we need to reset all the planes on
10469 	 * the stream.
10470 	 */
10471 	if (state->allow_modeset)
10472 		return true;
10473 
10474 	/* Exit early if we know that we're adding or removing the plane. */
10475 	if (old_plane_state->crtc != new_plane_state->crtc)
10476 		return true;
10477 
10478 	/* old crtc == new_crtc == NULL, plane not in context. */
10479 	if (!new_plane_state->crtc)
10480 		return false;
10481 
10482 	new_crtc_state =
10483 		drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
10484 
10485 	if (!new_crtc_state)
10486 		return true;
10487 
10488 	/* CRTC Degamma changes currently require us to recreate planes. */
10489 	if (new_crtc_state->color_mgmt_changed)
10490 		return true;
10491 
10492 	if (drm_atomic_crtc_needs_modeset(new_crtc_state))
10493 		return true;
10494 
10495 	/*
10496 	 * If there are any new primary or overlay planes being added or
10497 	 * removed then the z-order can potentially change. To ensure
10498 	 * correct z-order and pipe acquisition the current DC architecture
10499 	 * requires us to remove and recreate all existing planes.
10500 	 *
10501 	 * TODO: Come up with a more elegant solution for this.
10502 	 */
10503 	for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
10504 		struct amdgpu_framebuffer *old_afb, *new_afb;
10505 		if (other->type == DRM_PLANE_TYPE_CURSOR)
10506 			continue;
10507 
10508 		if (old_other_state->crtc != new_plane_state->crtc &&
10509 		    new_other_state->crtc != new_plane_state->crtc)
10510 			continue;
10511 
10512 		if (old_other_state->crtc != new_other_state->crtc)
10513 			return true;
10514 
10515 		/* Src/dst size and scaling updates. */
10516 		if (old_other_state->src_w != new_other_state->src_w ||
10517 		    old_other_state->src_h != new_other_state->src_h ||
10518 		    old_other_state->crtc_w != new_other_state->crtc_w ||
10519 		    old_other_state->crtc_h != new_other_state->crtc_h)
10520 			return true;
10521 
10522 		/* Rotation / mirroring updates. */
10523 		if (old_other_state->rotation != new_other_state->rotation)
10524 			return true;
10525 
10526 		/* Blending updates. */
10527 		if (old_other_state->pixel_blend_mode !=
10528 		    new_other_state->pixel_blend_mode)
10529 			return true;
10530 
10531 		/* Alpha updates. */
10532 		if (old_other_state->alpha != new_other_state->alpha)
10533 			return true;
10534 
10535 		/* Colorspace changes. */
10536 		if (old_other_state->color_range != new_other_state->color_range ||
10537 		    old_other_state->color_encoding != new_other_state->color_encoding)
10538 			return true;
10539 
10540 		/* Framebuffer checks fall at the end. */
10541 		if (!old_other_state->fb || !new_other_state->fb)
10542 			continue;
10543 
10544 		/* Pixel format changes can require bandwidth updates. */
10545 		if (old_other_state->fb->format != new_other_state->fb->format)
10546 			return true;
10547 
10548 		old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
10549 		new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
10550 
10551 		/* Tiling and DCC changes also require bandwidth updates. */
10552 		if (old_afb->tiling_flags != new_afb->tiling_flags ||
10553 		    old_afb->base.modifier != new_afb->base.modifier)
10554 			return true;
10555 	}
10556 
10557 	return false;
10558 }
10559 
10560 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
10561 			      struct drm_plane_state *new_plane_state,
10562 			      struct drm_framebuffer *fb)
10563 {
10564 	struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
10565 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
10566 	unsigned int pitch;
10567 	bool linear;
10568 
10569 	if (fb->width > new_acrtc->max_cursor_width ||
10570 	    fb->height > new_acrtc->max_cursor_height) {
10571 		DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
10572 				 new_plane_state->fb->width,
10573 				 new_plane_state->fb->height);
10574 		return -EINVAL;
10575 	}
10576 	if (new_plane_state->src_w != fb->width << 16 ||
10577 	    new_plane_state->src_h != fb->height << 16) {
10578 		DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10579 		return -EINVAL;
10580 	}
10581 
10582 	/* Pitch in pixels */
10583 	pitch = fb->pitches[0] / fb->format->cpp[0];
10584 
10585 	if (fb->width != pitch) {
10586 		DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
10587 				 fb->width, pitch);
10588 		return -EINVAL;
10589 	}
10590 
10591 	switch (pitch) {
10592 	case 64:
10593 	case 128:
10594 	case 256:
10595 		/* FB pitch is supported by cursor plane */
10596 		break;
10597 	default:
10598 		DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
10599 		return -EINVAL;
10600 	}
10601 
10602 	/* Core DRM takes care of checking FB modifiers, so we only need to
10603 	 * check tiling flags when the FB doesn't have a modifier. */
10604 	if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
10605 		if (adev->family < AMDGPU_FAMILY_AI) {
10606 			linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
10607 			         AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
10608 				 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
10609 		} else {
10610 			linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
10611 		}
10612 		if (!linear) {
10613 			DRM_DEBUG_ATOMIC("Cursor FB not linear");
10614 			return -EINVAL;
10615 		}
10616 	}
10617 
10618 	return 0;
10619 }
10620 
10621 static int dm_update_plane_state(struct dc *dc,
10622 				 struct drm_atomic_state *state,
10623 				 struct drm_plane *plane,
10624 				 struct drm_plane_state *old_plane_state,
10625 				 struct drm_plane_state *new_plane_state,
10626 				 bool enable,
10627 				 bool *lock_and_validation_needed)
10628 {
10629 
10630 	struct dm_atomic_state *dm_state = NULL;
10631 	struct drm_crtc *new_plane_crtc, *old_plane_crtc;
10632 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10633 	struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
10634 	struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
10635 	struct amdgpu_crtc *new_acrtc;
10636 	bool needs_reset;
10637 	int ret = 0;
10638 
10639 
10640 	new_plane_crtc = new_plane_state->crtc;
10641 	old_plane_crtc = old_plane_state->crtc;
10642 	dm_new_plane_state = to_dm_plane_state(new_plane_state);
10643 	dm_old_plane_state = to_dm_plane_state(old_plane_state);
10644 
10645 	if (plane->type == DRM_PLANE_TYPE_CURSOR) {
10646 		if (!enable || !new_plane_crtc ||
10647 			drm_atomic_plane_disabling(plane->state, new_plane_state))
10648 			return 0;
10649 
10650 		new_acrtc = to_amdgpu_crtc(new_plane_crtc);
10651 
10652 		if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
10653 			DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10654 			return -EINVAL;
10655 		}
10656 
10657 		if (new_plane_state->fb) {
10658 			ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
10659 						 new_plane_state->fb);
10660 			if (ret)
10661 				return ret;
10662 		}
10663 
10664 		return 0;
10665 	}
10666 
10667 	needs_reset = should_reset_plane(state, plane, old_plane_state,
10668 					 new_plane_state);
10669 
10670 	/* Remove any changed/removed planes */
10671 	if (!enable) {
10672 		if (!needs_reset)
10673 			return 0;
10674 
10675 		if (!old_plane_crtc)
10676 			return 0;
10677 
10678 		old_crtc_state = drm_atomic_get_old_crtc_state(
10679 				state, old_plane_crtc);
10680 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10681 
10682 		if (!dm_old_crtc_state->stream)
10683 			return 0;
10684 
10685 		DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
10686 				plane->base.id, old_plane_crtc->base.id);
10687 
10688 		ret = dm_atomic_get_state(state, &dm_state);
10689 		if (ret)
10690 			return ret;
10691 
10692 		if (!dc_remove_plane_from_context(
10693 				dc,
10694 				dm_old_crtc_state->stream,
10695 				dm_old_plane_state->dc_state,
10696 				dm_state->context)) {
10697 
10698 			return -EINVAL;
10699 		}
10700 
10701 
10702 		dc_plane_state_release(dm_old_plane_state->dc_state);
10703 		dm_new_plane_state->dc_state = NULL;
10704 
10705 		*lock_and_validation_needed = true;
10706 
10707 	} else { /* Add new planes */
10708 		struct dc_plane_state *dc_new_plane_state;
10709 
10710 		if (drm_atomic_plane_disabling(plane->state, new_plane_state))
10711 			return 0;
10712 
10713 		if (!new_plane_crtc)
10714 			return 0;
10715 
10716 		new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
10717 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10718 
10719 		if (!dm_new_crtc_state->stream)
10720 			return 0;
10721 
10722 		if (!needs_reset)
10723 			return 0;
10724 
10725 		ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
10726 		if (ret)
10727 			return ret;
10728 
10729 		WARN_ON(dm_new_plane_state->dc_state);
10730 
10731 		dc_new_plane_state = dc_create_plane_state(dc);
10732 		if (!dc_new_plane_state)
10733 			return -ENOMEM;
10734 
10735 		DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
10736 				 plane->base.id, new_plane_crtc->base.id);
10737 
10738 		ret = fill_dc_plane_attributes(
10739 			drm_to_adev(new_plane_crtc->dev),
10740 			dc_new_plane_state,
10741 			new_plane_state,
10742 			new_crtc_state);
10743 		if (ret) {
10744 			dc_plane_state_release(dc_new_plane_state);
10745 			return ret;
10746 		}
10747 
10748 		ret = dm_atomic_get_state(state, &dm_state);
10749 		if (ret) {
10750 			dc_plane_state_release(dc_new_plane_state);
10751 			return ret;
10752 		}
10753 
10754 		/*
10755 		 * Any atomic check errors that occur after this will
10756 		 * not need a release. The plane state will be attached
10757 		 * to the stream, and therefore part of the atomic
10758 		 * state. It'll be released when the atomic state is
10759 		 * cleaned.
10760 		 */
10761 		if (!dc_add_plane_to_context(
10762 				dc,
10763 				dm_new_crtc_state->stream,
10764 				dc_new_plane_state,
10765 				dm_state->context)) {
10766 
10767 			dc_plane_state_release(dc_new_plane_state);
10768 			return -EINVAL;
10769 		}
10770 
10771 		dm_new_plane_state->dc_state = dc_new_plane_state;
10772 
10773 		dm_new_crtc_state->mpo_requested |= (plane->type == DRM_PLANE_TYPE_OVERLAY);
10774 
10775 		/* Tell DC to do a full surface update every time there
10776 		 * is a plane change. Inefficient, but works for now.
10777 		 */
10778 		dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
10779 
10780 		*lock_and_validation_needed = true;
10781 	}
10782 
10783 
10784 	return ret;
10785 }
10786 
10787 static void dm_get_oriented_plane_size(struct drm_plane_state *plane_state,
10788 				       int *src_w, int *src_h)
10789 {
10790 	switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
10791 	case DRM_MODE_ROTATE_90:
10792 	case DRM_MODE_ROTATE_270:
10793 		*src_w = plane_state->src_h >> 16;
10794 		*src_h = plane_state->src_w >> 16;
10795 		break;
10796 	case DRM_MODE_ROTATE_0:
10797 	case DRM_MODE_ROTATE_180:
10798 	default:
10799 		*src_w = plane_state->src_w >> 16;
10800 		*src_h = plane_state->src_h >> 16;
10801 		break;
10802 	}
10803 }
10804 
10805 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
10806 				struct drm_crtc *crtc,
10807 				struct drm_crtc_state *new_crtc_state)
10808 {
10809 	struct drm_plane *cursor = crtc->cursor, *underlying;
10810 	struct drm_plane_state *new_cursor_state, *new_underlying_state;
10811 	int i;
10812 	int cursor_scale_w, cursor_scale_h, underlying_scale_w, underlying_scale_h;
10813 	int cursor_src_w, cursor_src_h;
10814 	int underlying_src_w, underlying_src_h;
10815 
10816 	/* On DCE and DCN there is no dedicated hardware cursor plane. We get a
10817 	 * cursor per pipe but it's going to inherit the scaling and
10818 	 * positioning from the underlying pipe. Check the cursor plane's
10819 	 * blending properties match the underlying planes'. */
10820 
10821 	new_cursor_state = drm_atomic_get_new_plane_state(state, cursor);
10822 	if (!new_cursor_state || !new_cursor_state->fb) {
10823 		return 0;
10824 	}
10825 
10826 	dm_get_oriented_plane_size(new_cursor_state, &cursor_src_w, &cursor_src_h);
10827 	cursor_scale_w = new_cursor_state->crtc_w * 1000 / cursor_src_w;
10828 	cursor_scale_h = new_cursor_state->crtc_h * 1000 / cursor_src_h;
10829 
10830 	for_each_new_plane_in_state_reverse(state, underlying, new_underlying_state, i) {
10831 		/* Narrow down to non-cursor planes on the same CRTC as the cursor */
10832 		if (new_underlying_state->crtc != crtc || underlying == crtc->cursor)
10833 			continue;
10834 
10835 		/* Ignore disabled planes */
10836 		if (!new_underlying_state->fb)
10837 			continue;
10838 
10839 		dm_get_oriented_plane_size(new_underlying_state,
10840 					   &underlying_src_w, &underlying_src_h);
10841 		underlying_scale_w = new_underlying_state->crtc_w * 1000 / underlying_src_w;
10842 		underlying_scale_h = new_underlying_state->crtc_h * 1000 / underlying_src_h;
10843 
10844 		if (cursor_scale_w != underlying_scale_w ||
10845 		    cursor_scale_h != underlying_scale_h) {
10846 			drm_dbg_atomic(crtc->dev,
10847 				       "Cursor [PLANE:%d:%s] scaling doesn't match underlying [PLANE:%d:%s]\n",
10848 				       cursor->base.id, cursor->name, underlying->base.id, underlying->name);
10849 			return -EINVAL;
10850 		}
10851 
10852 		/* If this plane covers the whole CRTC, no need to check planes underneath */
10853 		if (new_underlying_state->crtc_x <= 0 &&
10854 		    new_underlying_state->crtc_y <= 0 &&
10855 		    new_underlying_state->crtc_x + new_underlying_state->crtc_w >= new_crtc_state->mode.hdisplay &&
10856 		    new_underlying_state->crtc_y + new_underlying_state->crtc_h >= new_crtc_state->mode.vdisplay)
10857 			break;
10858 	}
10859 
10860 	return 0;
10861 }
10862 
10863 #if defined(CONFIG_DRM_AMD_DC_DCN)
10864 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
10865 {
10866 	struct drm_connector *connector;
10867 	struct drm_connector_state *conn_state, *old_conn_state;
10868 	struct amdgpu_dm_connector *aconnector = NULL;
10869 	int i;
10870 	for_each_oldnew_connector_in_state(state, connector, old_conn_state, conn_state, i) {
10871 		if (!conn_state->crtc)
10872 			conn_state = old_conn_state;
10873 
10874 		if (conn_state->crtc != crtc)
10875 			continue;
10876 
10877 		aconnector = to_amdgpu_dm_connector(connector);
10878 		if (!aconnector->port || !aconnector->mst_port)
10879 			aconnector = NULL;
10880 		else
10881 			break;
10882 	}
10883 
10884 	if (!aconnector)
10885 		return 0;
10886 
10887 	return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
10888 }
10889 #endif
10890 
10891 /**
10892  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
10893  * @dev: The DRM device
10894  * @state: The atomic state to commit
10895  *
10896  * Validate that the given atomic state is programmable by DC into hardware.
10897  * This involves constructing a &struct dc_state reflecting the new hardware
10898  * state we wish to commit, then querying DC to see if it is programmable. It's
10899  * important not to modify the existing DC state. Otherwise, atomic_check
10900  * may unexpectedly commit hardware changes.
10901  *
10902  * When validating the DC state, it's important that the right locks are
10903  * acquired. For full updates case which removes/adds/updates streams on one
10904  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
10905  * that any such full update commit will wait for completion of any outstanding
10906  * flip using DRMs synchronization events.
10907  *
10908  * Note that DM adds the affected connectors for all CRTCs in state, when that
10909  * might not seem necessary. This is because DC stream creation requires the
10910  * DC sink, which is tied to the DRM connector state. Cleaning this up should
10911  * be possible but non-trivial - a possible TODO item.
10912  *
10913  * Return: -Error code if validation failed.
10914  */
10915 static int amdgpu_dm_atomic_check(struct drm_device *dev,
10916 				  struct drm_atomic_state *state)
10917 {
10918 	struct amdgpu_device *adev = drm_to_adev(dev);
10919 	struct dm_atomic_state *dm_state = NULL;
10920 	struct dc *dc = adev->dm.dc;
10921 	struct drm_connector *connector;
10922 	struct drm_connector_state *old_con_state, *new_con_state;
10923 	struct drm_crtc *crtc;
10924 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10925 	struct drm_plane *plane;
10926 	struct drm_plane_state *old_plane_state, *new_plane_state;
10927 	enum dc_status status;
10928 	int ret, i;
10929 	bool lock_and_validation_needed = false;
10930 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
10931 #if defined(CONFIG_DRM_AMD_DC_DCN)
10932 	struct dsc_mst_fairness_vars vars[MAX_PIPES];
10933 	struct drm_dp_mst_topology_state *mst_state;
10934 	struct drm_dp_mst_topology_mgr *mgr;
10935 #endif
10936 
10937 	trace_amdgpu_dm_atomic_check_begin(state);
10938 
10939 	ret = drm_atomic_helper_check_modeset(dev, state);
10940 	if (ret) {
10941 		DRM_DEBUG_DRIVER("drm_atomic_helper_check_modeset() failed\n");
10942 		goto fail;
10943 	}
10944 
10945 	/* Check connector changes */
10946 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10947 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10948 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10949 
10950 		/* Skip connectors that are disabled or part of modeset already. */
10951 		if (!old_con_state->crtc && !new_con_state->crtc)
10952 			continue;
10953 
10954 		if (!new_con_state->crtc)
10955 			continue;
10956 
10957 		new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
10958 		if (IS_ERR(new_crtc_state)) {
10959 			DRM_DEBUG_DRIVER("drm_atomic_get_crtc_state() failed\n");
10960 			ret = PTR_ERR(new_crtc_state);
10961 			goto fail;
10962 		}
10963 
10964 		if (dm_old_con_state->abm_level !=
10965 		    dm_new_con_state->abm_level)
10966 			new_crtc_state->connectors_changed = true;
10967 	}
10968 
10969 #if defined(CONFIG_DRM_AMD_DC_DCN)
10970 	if (dc_resource_is_dsc_encoding_supported(dc)) {
10971 		for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10972 			if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10973 				ret = add_affected_mst_dsc_crtcs(state, crtc);
10974 				if (ret) {
10975 					DRM_DEBUG_DRIVER("add_affected_mst_dsc_crtcs() failed\n");
10976 					goto fail;
10977 				}
10978 			}
10979 		}
10980 		pre_validate_dsc(state, &dm_state, vars);
10981 	}
10982 #endif
10983 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10984 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10985 
10986 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
10987 		    !new_crtc_state->color_mgmt_changed &&
10988 		    old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
10989 			dm_old_crtc_state->dsc_force_changed == false)
10990 			continue;
10991 
10992 		ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
10993 		if (ret) {
10994 			DRM_DEBUG_DRIVER("amdgpu_dm_verify_lut_sizes() failed\n");
10995 			goto fail;
10996 		}
10997 
10998 		if (!new_crtc_state->enable)
10999 			continue;
11000 
11001 		ret = drm_atomic_add_affected_connectors(state, crtc);
11002 		if (ret) {
11003 			DRM_DEBUG_DRIVER("drm_atomic_add_affected_connectors() failed\n");
11004 			goto fail;
11005 		}
11006 
11007 		ret = drm_atomic_add_affected_planes(state, crtc);
11008 		if (ret) {
11009 			DRM_DEBUG_DRIVER("drm_atomic_add_affected_planes() failed\n");
11010 			goto fail;
11011 		}
11012 
11013 		if (dm_old_crtc_state->dsc_force_changed)
11014 			new_crtc_state->mode_changed = true;
11015 	}
11016 
11017 	/*
11018 	 * Add all primary and overlay planes on the CRTC to the state
11019 	 * whenever a plane is enabled to maintain correct z-ordering
11020 	 * and to enable fast surface updates.
11021 	 */
11022 	drm_for_each_crtc(crtc, dev) {
11023 		bool modified = false;
11024 
11025 		for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
11026 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
11027 				continue;
11028 
11029 			if (new_plane_state->crtc == crtc ||
11030 			    old_plane_state->crtc == crtc) {
11031 				modified = true;
11032 				break;
11033 			}
11034 		}
11035 
11036 		if (!modified)
11037 			continue;
11038 
11039 		drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
11040 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
11041 				continue;
11042 
11043 			new_plane_state =
11044 				drm_atomic_get_plane_state(state, plane);
11045 
11046 			if (IS_ERR(new_plane_state)) {
11047 				ret = PTR_ERR(new_plane_state);
11048 				DRM_DEBUG_DRIVER("new_plane_state is BAD\n");
11049 				goto fail;
11050 			}
11051 		}
11052 	}
11053 
11054 	/* Remove exiting planes if they are modified */
11055 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
11056 		ret = dm_update_plane_state(dc, state, plane,
11057 					    old_plane_state,
11058 					    new_plane_state,
11059 					    false,
11060 					    &lock_and_validation_needed);
11061 		if (ret) {
11062 			DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
11063 			goto fail;
11064 		}
11065 	}
11066 
11067 	/* Disable all crtcs which require disable */
11068 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11069 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
11070 					   old_crtc_state,
11071 					   new_crtc_state,
11072 					   false,
11073 					   &lock_and_validation_needed);
11074 		if (ret) {
11075 			DRM_DEBUG_DRIVER("DISABLE: dm_update_crtc_state() failed\n");
11076 			goto fail;
11077 		}
11078 	}
11079 
11080 	/* Enable all crtcs which require enable */
11081 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11082 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
11083 					   old_crtc_state,
11084 					   new_crtc_state,
11085 					   true,
11086 					   &lock_and_validation_needed);
11087 		if (ret) {
11088 			DRM_DEBUG_DRIVER("ENABLE: dm_update_crtc_state() failed\n");
11089 			goto fail;
11090 		}
11091 	}
11092 
11093 	/* Add new/modified planes */
11094 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
11095 		ret = dm_update_plane_state(dc, state, plane,
11096 					    old_plane_state,
11097 					    new_plane_state,
11098 					    true,
11099 					    &lock_and_validation_needed);
11100 		if (ret) {
11101 			DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
11102 			goto fail;
11103 		}
11104 	}
11105 
11106 	/* Run this here since we want to validate the streams we created */
11107 	ret = drm_atomic_helper_check_planes(dev, state);
11108 	if (ret) {
11109 		DRM_DEBUG_DRIVER("drm_atomic_helper_check_planes() failed\n");
11110 		goto fail;
11111 	}
11112 
11113 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
11114 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
11115 		if (dm_new_crtc_state->mpo_requested)
11116 			DRM_DEBUG_DRIVER("MPO enablement requested on crtc:[%p]\n", crtc);
11117 	}
11118 
11119 	/* Check cursor planes scaling */
11120 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
11121 		ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
11122 		if (ret) {
11123 			DRM_DEBUG_DRIVER("dm_check_crtc_cursor() failed\n");
11124 			goto fail;
11125 		}
11126 	}
11127 
11128 	if (state->legacy_cursor_update) {
11129 		/*
11130 		 * This is a fast cursor update coming from the plane update
11131 		 * helper, check if it can be done asynchronously for better
11132 		 * performance.
11133 		 */
11134 		state->async_update =
11135 			!drm_atomic_helper_async_check(dev, state);
11136 
11137 		/*
11138 		 * Skip the remaining global validation if this is an async
11139 		 * update. Cursor updates can be done without affecting
11140 		 * state or bandwidth calcs and this avoids the performance
11141 		 * penalty of locking the private state object and
11142 		 * allocating a new dc_state.
11143 		 */
11144 		if (state->async_update)
11145 			return 0;
11146 	}
11147 
11148 	/* Check scaling and underscan changes*/
11149 	/* TODO Removed scaling changes validation due to inability to commit
11150 	 * new stream into context w\o causing full reset. Need to
11151 	 * decide how to handle.
11152 	 */
11153 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
11154 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
11155 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
11156 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
11157 
11158 		/* Skip any modesets/resets */
11159 		if (!acrtc || drm_atomic_crtc_needs_modeset(
11160 				drm_atomic_get_new_crtc_state(state, &acrtc->base)))
11161 			continue;
11162 
11163 		/* Skip any thing not scale or underscan changes */
11164 		if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
11165 			continue;
11166 
11167 		lock_and_validation_needed = true;
11168 	}
11169 
11170 #if defined(CONFIG_DRM_AMD_DC_DCN)
11171 	/* set the slot info for each mst_state based on the link encoding format */
11172 	for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
11173 		struct amdgpu_dm_connector *aconnector;
11174 		struct drm_connector *connector;
11175 		struct drm_connector_list_iter iter;
11176 		u8 link_coding_cap;
11177 
11178 		if (!mgr->mst_state )
11179 			continue;
11180 
11181 		drm_connector_list_iter_begin(dev, &iter);
11182 		drm_for_each_connector_iter(connector, &iter) {
11183 			int id = connector->index;
11184 
11185 			if (id == mst_state->mgr->conn_base_id) {
11186 				aconnector = to_amdgpu_dm_connector(connector);
11187 				link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link);
11188 				drm_dp_mst_update_slots(mst_state, link_coding_cap);
11189 
11190 				break;
11191 			}
11192 		}
11193 		drm_connector_list_iter_end(&iter);
11194 
11195 	}
11196 #endif
11197 	/**
11198 	 * Streams and planes are reset when there are changes that affect
11199 	 * bandwidth. Anything that affects bandwidth needs to go through
11200 	 * DC global validation to ensure that the configuration can be applied
11201 	 * to hardware.
11202 	 *
11203 	 * We have to currently stall out here in atomic_check for outstanding
11204 	 * commits to finish in this case because our IRQ handlers reference
11205 	 * DRM state directly - we can end up disabling interrupts too early
11206 	 * if we don't.
11207 	 *
11208 	 * TODO: Remove this stall and drop DM state private objects.
11209 	 */
11210 	if (lock_and_validation_needed) {
11211 		ret = dm_atomic_get_state(state, &dm_state);
11212 		if (ret) {
11213 			DRM_DEBUG_DRIVER("dm_atomic_get_state() failed\n");
11214 			goto fail;
11215 		}
11216 
11217 		ret = do_aquire_global_lock(dev, state);
11218 		if (ret) {
11219 			DRM_DEBUG_DRIVER("do_aquire_global_lock() failed\n");
11220 			goto fail;
11221 		}
11222 
11223 #if defined(CONFIG_DRM_AMD_DC_DCN)
11224 		if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars)) {
11225 			DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n");
11226 			goto fail;
11227 		}
11228 
11229 		ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
11230 		if (ret) {
11231 			DRM_DEBUG_DRIVER("dm_update_mst_vcpi_slots_for_dsc() failed\n");
11232 			goto fail;
11233 		}
11234 #endif
11235 
11236 		/*
11237 		 * Perform validation of MST topology in the state:
11238 		 * We need to perform MST atomic check before calling
11239 		 * dc_validate_global_state(), or there is a chance
11240 		 * to get stuck in an infinite loop and hang eventually.
11241 		 */
11242 		ret = drm_dp_mst_atomic_check(state);
11243 		if (ret) {
11244 			DRM_DEBUG_DRIVER("drm_dp_mst_atomic_check() failed\n");
11245 			goto fail;
11246 		}
11247 		status = dc_validate_global_state(dc, dm_state->context, true);
11248 		if (status != DC_OK) {
11249 			DRM_DEBUG_DRIVER("DC global validation failure: %s (%d)",
11250 				       dc_status_to_str(status), status);
11251 			ret = -EINVAL;
11252 			goto fail;
11253 		}
11254 	} else {
11255 		/*
11256 		 * The commit is a fast update. Fast updates shouldn't change
11257 		 * the DC context, affect global validation, and can have their
11258 		 * commit work done in parallel with other commits not touching
11259 		 * the same resource. If we have a new DC context as part of
11260 		 * the DM atomic state from validation we need to free it and
11261 		 * retain the existing one instead.
11262 		 *
11263 		 * Furthermore, since the DM atomic state only contains the DC
11264 		 * context and can safely be annulled, we can free the state
11265 		 * and clear the associated private object now to free
11266 		 * some memory and avoid a possible use-after-free later.
11267 		 */
11268 
11269 		for (i = 0; i < state->num_private_objs; i++) {
11270 			struct drm_private_obj *obj = state->private_objs[i].ptr;
11271 
11272 			if (obj->funcs == adev->dm.atomic_obj.funcs) {
11273 				int j = state->num_private_objs-1;
11274 
11275 				dm_atomic_destroy_state(obj,
11276 						state->private_objs[i].state);
11277 
11278 				/* If i is not at the end of the array then the
11279 				 * last element needs to be moved to where i was
11280 				 * before the array can safely be truncated.
11281 				 */
11282 				if (i != j)
11283 					state->private_objs[i] =
11284 						state->private_objs[j];
11285 
11286 				state->private_objs[j].ptr = NULL;
11287 				state->private_objs[j].state = NULL;
11288 				state->private_objs[j].old_state = NULL;
11289 				state->private_objs[j].new_state = NULL;
11290 
11291 				state->num_private_objs = j;
11292 				break;
11293 			}
11294 		}
11295 	}
11296 
11297 	/* Store the overall update type for use later in atomic check. */
11298 	for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
11299 		struct dm_crtc_state *dm_new_crtc_state =
11300 			to_dm_crtc_state(new_crtc_state);
11301 
11302 		dm_new_crtc_state->update_type = lock_and_validation_needed ?
11303 							 UPDATE_TYPE_FULL :
11304 							 UPDATE_TYPE_FAST;
11305 	}
11306 
11307 	/* Must be success */
11308 	WARN_ON(ret);
11309 
11310 	trace_amdgpu_dm_atomic_check_finish(state, ret);
11311 
11312 	return ret;
11313 
11314 fail:
11315 	if (ret == -EDEADLK)
11316 		DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
11317 	else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
11318 		DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
11319 	else
11320 		DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
11321 
11322 	trace_amdgpu_dm_atomic_check_finish(state, ret);
11323 
11324 	return ret;
11325 }
11326 
11327 static bool is_dp_capable_without_timing_msa(struct dc *dc,
11328 					     struct amdgpu_dm_connector *amdgpu_dm_connector)
11329 {
11330 	uint8_t dpcd_data;
11331 	bool capable = false;
11332 
11333 	if (amdgpu_dm_connector->dc_link &&
11334 		dm_helpers_dp_read_dpcd(
11335 				NULL,
11336 				amdgpu_dm_connector->dc_link,
11337 				DP_DOWN_STREAM_PORT_COUNT,
11338 				&dpcd_data,
11339 				sizeof(dpcd_data))) {
11340 		capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
11341 	}
11342 
11343 	return capable;
11344 }
11345 
11346 static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
11347 		unsigned int offset,
11348 		unsigned int total_length,
11349 		uint8_t *data,
11350 		unsigned int length,
11351 		struct amdgpu_hdmi_vsdb_info *vsdb)
11352 {
11353 	bool res;
11354 	union dmub_rb_cmd cmd;
11355 	struct dmub_cmd_send_edid_cea *input;
11356 	struct dmub_cmd_edid_cea_output *output;
11357 
11358 	if (length > DMUB_EDID_CEA_DATA_CHUNK_BYTES)
11359 		return false;
11360 
11361 	memset(&cmd, 0, sizeof(cmd));
11362 
11363 	input = &cmd.edid_cea.data.input;
11364 
11365 	cmd.edid_cea.header.type = DMUB_CMD__EDID_CEA;
11366 	cmd.edid_cea.header.sub_type = 0;
11367 	cmd.edid_cea.header.payload_bytes =
11368 		sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header);
11369 	input->offset = offset;
11370 	input->length = length;
11371 	input->cea_total_length = total_length;
11372 	memcpy(input->payload, data, length);
11373 
11374 	res = dc_dmub_srv_cmd_with_reply_data(dm->dc->ctx->dmub_srv, &cmd);
11375 	if (!res) {
11376 		DRM_ERROR("EDID CEA parser failed\n");
11377 		return false;
11378 	}
11379 
11380 	output = &cmd.edid_cea.data.output;
11381 
11382 	if (output->type == DMUB_CMD__EDID_CEA_ACK) {
11383 		if (!output->ack.success) {
11384 			DRM_ERROR("EDID CEA ack failed at offset %d\n",
11385 					output->ack.offset);
11386 		}
11387 	} else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) {
11388 		if (!output->amd_vsdb.vsdb_found)
11389 			return false;
11390 
11391 		vsdb->freesync_supported = output->amd_vsdb.freesync_supported;
11392 		vsdb->amd_vsdb_version = output->amd_vsdb.amd_vsdb_version;
11393 		vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate;
11394 		vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate;
11395 	} else {
11396 		DRM_WARN("Unknown EDID CEA parser results\n");
11397 		return false;
11398 	}
11399 
11400 	return true;
11401 }
11402 
11403 static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
11404 		uint8_t *edid_ext, int len,
11405 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
11406 {
11407 	int i;
11408 
11409 	/* send extension block to DMCU for parsing */
11410 	for (i = 0; i < len; i += 8) {
11411 		bool res;
11412 		int offset;
11413 
11414 		/* send 8 bytes a time */
11415 		if (!dc_edid_parser_send_cea(dm->dc, i, len, &edid_ext[i], 8))
11416 			return false;
11417 
11418 		if (i+8 == len) {
11419 			/* EDID block sent completed, expect result */
11420 			int version, min_rate, max_rate;
11421 
11422 			res = dc_edid_parser_recv_amd_vsdb(dm->dc, &version, &min_rate, &max_rate);
11423 			if (res) {
11424 				/* amd vsdb found */
11425 				vsdb_info->freesync_supported = 1;
11426 				vsdb_info->amd_vsdb_version = version;
11427 				vsdb_info->min_refresh_rate_hz = min_rate;
11428 				vsdb_info->max_refresh_rate_hz = max_rate;
11429 				return true;
11430 			}
11431 			/* not amd vsdb */
11432 			return false;
11433 		}
11434 
11435 		/* check for ack*/
11436 		res = dc_edid_parser_recv_cea_ack(dm->dc, &offset);
11437 		if (!res)
11438 			return false;
11439 	}
11440 
11441 	return false;
11442 }
11443 
11444 static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,
11445 		uint8_t *edid_ext, int len,
11446 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
11447 {
11448 	int i;
11449 
11450 	/* send extension block to DMCU for parsing */
11451 	for (i = 0; i < len; i += 8) {
11452 		/* send 8 bytes a time */
11453 		if (!dm_edid_parser_send_cea(dm, i, len, &edid_ext[i], 8, vsdb_info))
11454 			return false;
11455 	}
11456 
11457 	return vsdb_info->freesync_supported;
11458 }
11459 
11460 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
11461 		uint8_t *edid_ext, int len,
11462 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
11463 {
11464 	struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
11465 
11466 	if (adev->dm.dmub_srv)
11467 		return parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info);
11468 	else
11469 		return parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info);
11470 }
11471 
11472 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
11473 		struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
11474 {
11475 	uint8_t *edid_ext = NULL;
11476 	int i;
11477 	bool valid_vsdb_found = false;
11478 
11479 	/*----- drm_find_cea_extension() -----*/
11480 	/* No EDID or EDID extensions */
11481 	if (edid == NULL || edid->extensions == 0)
11482 		return -ENODEV;
11483 
11484 	/* Find CEA extension */
11485 	for (i = 0; i < edid->extensions; i++) {
11486 		edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
11487 		if (edid_ext[0] == CEA_EXT)
11488 			break;
11489 	}
11490 
11491 	if (i == edid->extensions)
11492 		return -ENODEV;
11493 
11494 	/*----- cea_db_offsets() -----*/
11495 	if (edid_ext[0] != CEA_EXT)
11496 		return -ENODEV;
11497 
11498 	valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
11499 
11500 	return valid_vsdb_found ? i : -ENODEV;
11501 }
11502 
11503 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
11504 					struct edid *edid)
11505 {
11506 	int i = 0;
11507 	struct detailed_timing *timing;
11508 	struct detailed_non_pixel *data;
11509 	struct detailed_data_monitor_range *range;
11510 	struct amdgpu_dm_connector *amdgpu_dm_connector =
11511 			to_amdgpu_dm_connector(connector);
11512 	struct dm_connector_state *dm_con_state = NULL;
11513 	struct dc_sink *sink;
11514 
11515 	struct drm_device *dev = connector->dev;
11516 	struct amdgpu_device *adev = drm_to_adev(dev);
11517 	bool freesync_capable = false;
11518 	struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
11519 
11520 	if (!connector->state) {
11521 		DRM_ERROR("%s - Connector has no state", __func__);
11522 		goto update;
11523 	}
11524 
11525 	sink = amdgpu_dm_connector->dc_sink ?
11526 		amdgpu_dm_connector->dc_sink :
11527 		amdgpu_dm_connector->dc_em_sink;
11528 
11529 	if (!edid || !sink) {
11530 		dm_con_state = to_dm_connector_state(connector->state);
11531 
11532 		amdgpu_dm_connector->min_vfreq = 0;
11533 		amdgpu_dm_connector->max_vfreq = 0;
11534 		amdgpu_dm_connector->pixel_clock_mhz = 0;
11535 		connector->display_info.monitor_range.min_vfreq = 0;
11536 		connector->display_info.monitor_range.max_vfreq = 0;
11537 		freesync_capable = false;
11538 
11539 		goto update;
11540 	}
11541 
11542 	dm_con_state = to_dm_connector_state(connector->state);
11543 
11544 	if (!adev->dm.freesync_module)
11545 		goto update;
11546 
11547 
11548 	if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
11549 		|| sink->sink_signal == SIGNAL_TYPE_EDP) {
11550 		bool edid_check_required = false;
11551 
11552 		if (edid) {
11553 			edid_check_required = is_dp_capable_without_timing_msa(
11554 						adev->dm.dc,
11555 						amdgpu_dm_connector);
11556 		}
11557 
11558 		if (edid_check_required == true && (edid->version > 1 ||
11559 		   (edid->version == 1 && edid->revision > 1))) {
11560 			for (i = 0; i < 4; i++) {
11561 
11562 				timing	= &edid->detailed_timings[i];
11563 				data	= &timing->data.other_data;
11564 				range	= &data->data.range;
11565 				/*
11566 				 * Check if monitor has continuous frequency mode
11567 				 */
11568 				if (data->type != EDID_DETAIL_MONITOR_RANGE)
11569 					continue;
11570 				/*
11571 				 * Check for flag range limits only. If flag == 1 then
11572 				 * no additional timing information provided.
11573 				 * Default GTF, GTF Secondary curve and CVT are not
11574 				 * supported
11575 				 */
11576 				if (range->flags != 1)
11577 					continue;
11578 
11579 				amdgpu_dm_connector->min_vfreq = range->min_vfreq;
11580 				amdgpu_dm_connector->max_vfreq = range->max_vfreq;
11581 				amdgpu_dm_connector->pixel_clock_mhz =
11582 					range->pixel_clock_mhz * 10;
11583 
11584 				connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
11585 				connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
11586 
11587 				break;
11588 			}
11589 
11590 			if (amdgpu_dm_connector->max_vfreq -
11591 			    amdgpu_dm_connector->min_vfreq > 10) {
11592 
11593 				freesync_capable = true;
11594 			}
11595 		}
11596 	} else if (edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
11597 		i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
11598 		if (i >= 0 && vsdb_info.freesync_supported) {
11599 			timing  = &edid->detailed_timings[i];
11600 			data    = &timing->data.other_data;
11601 
11602 			amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
11603 			amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
11604 			if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
11605 				freesync_capable = true;
11606 
11607 			connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
11608 			connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
11609 		}
11610 	}
11611 
11612 update:
11613 	if (dm_con_state)
11614 		dm_con_state->freesync_capable = freesync_capable;
11615 
11616 	if (connector->vrr_capable_property)
11617 		drm_connector_set_vrr_capable_property(connector,
11618 						       freesync_capable);
11619 }
11620 
11621 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
11622 {
11623 	struct amdgpu_device *adev = drm_to_adev(dev);
11624 	struct dc *dc = adev->dm.dc;
11625 	int i;
11626 
11627 	mutex_lock(&adev->dm.dc_lock);
11628 	if (dc->current_state) {
11629 		for (i = 0; i < dc->current_state->stream_count; ++i)
11630 			dc->current_state->streams[i]
11631 				->triggered_crtc_reset.enabled =
11632 				adev->dm.force_timing_sync;
11633 
11634 		dm_enable_per_frame_crtc_master_sync(dc->current_state);
11635 		dc_trigger_sync(dc, dc->current_state);
11636 	}
11637 	mutex_unlock(&adev->dm.dc_lock);
11638 }
11639 
11640 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
11641 		       uint32_t value, const char *func_name)
11642 {
11643 #ifdef DM_CHECK_ADDR_0
11644 	if (address == 0) {
11645 		DC_ERR("invalid register write. address = 0");
11646 		return;
11647 	}
11648 #endif
11649 	cgs_write_register(ctx->cgs_device, address, value);
11650 	trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
11651 }
11652 
11653 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
11654 			  const char *func_name)
11655 {
11656 	uint32_t value;
11657 #ifdef DM_CHECK_ADDR_0
11658 	if (address == 0) {
11659 		DC_ERR("invalid register read; address = 0\n");
11660 		return 0;
11661 	}
11662 #endif
11663 
11664 	if (ctx->dmub_srv &&
11665 	    ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
11666 	    !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
11667 		ASSERT(false);
11668 		return 0;
11669 	}
11670 
11671 	value = cgs_read_register(ctx->cgs_device, address);
11672 
11673 	trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
11674 
11675 	return value;
11676 }
11677 
11678 static int amdgpu_dm_set_dmub_async_sync_status(bool is_cmd_aux,
11679 						struct dc_context *ctx,
11680 						uint8_t status_type,
11681 						uint32_t *operation_result)
11682 {
11683 	struct amdgpu_device *adev = ctx->driver_context;
11684 	int return_status = -1;
11685 	struct dmub_notification *p_notify = adev->dm.dmub_notify;
11686 
11687 	if (is_cmd_aux) {
11688 		if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
11689 			return_status = p_notify->aux_reply.length;
11690 			*operation_result = p_notify->result;
11691 		} else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT) {
11692 			*operation_result = AUX_RET_ERROR_TIMEOUT;
11693 		} else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_FAIL) {
11694 			*operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE;
11695 		} else {
11696 			*operation_result = AUX_RET_ERROR_UNKNOWN;
11697 		}
11698 	} else {
11699 		if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
11700 			return_status = 0;
11701 			*operation_result = p_notify->sc_status;
11702 		} else {
11703 			*operation_result = SET_CONFIG_UNKNOWN_ERROR;
11704 		}
11705 	}
11706 
11707 	return return_status;
11708 }
11709 
11710 int amdgpu_dm_process_dmub_aux_transfer_sync(bool is_cmd_aux, struct dc_context *ctx,
11711 	unsigned int link_index, void *cmd_payload, void *operation_result)
11712 {
11713 	struct amdgpu_device *adev = ctx->driver_context;
11714 	int ret = 0;
11715 
11716 	if (is_cmd_aux) {
11717 		dc_process_dmub_aux_transfer_async(ctx->dc,
11718 			link_index, (struct aux_payload *)cmd_payload);
11719 	} else if (dc_process_dmub_set_config_async(ctx->dc, link_index,
11720 					(struct set_config_cmd_payload *)cmd_payload,
11721 					adev->dm.dmub_notify)) {
11722 		return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11723 					ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
11724 					(uint32_t *)operation_result);
11725 	}
11726 
11727 	ret = wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ);
11728 	if (ret == 0) {
11729 		DRM_ERROR("wait_for_completion_timeout timeout!");
11730 		return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11731 				ctx, DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT,
11732 				(uint32_t *)operation_result);
11733 	}
11734 
11735 	if (is_cmd_aux) {
11736 		if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) {
11737 			struct aux_payload *payload = (struct aux_payload *)cmd_payload;
11738 
11739 			payload->reply[0] = adev->dm.dmub_notify->aux_reply.command;
11740 			if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
11741 			    payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK) {
11742 				memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
11743 				       adev->dm.dmub_notify->aux_reply.length);
11744 			}
11745 		}
11746 	}
11747 
11748 	return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11749 			ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
11750 			(uint32_t *)operation_result);
11751 }
11752 
11753 /*
11754  * Check whether seamless boot is supported.
11755  *
11756  * So far we only support seamless boot on CHIP_VANGOGH.
11757  * If everything goes well, we may consider expanding
11758  * seamless boot to other ASICs.
11759  */
11760 bool check_seamless_boot_capability(struct amdgpu_device *adev)
11761 {
11762 	switch (adev->asic_type) {
11763 	case CHIP_VANGOGH:
11764 		if (!adev->mman.keep_stolen_vga_memory)
11765 			return true;
11766 		break;
11767 	default:
11768 		break;
11769 	}
11770 
11771 	return false;
11772 }
11773