xref: /linux/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c (revision f82811e22b480a203a438d8e1f29af9c93ccbb0c)
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <linux/pci.h>
25 
26 #include <drm/drm_edid.h>
27 #include <drm/drm_fourcc.h>
28 #include <drm/drm_modeset_helper.h>
29 #include <drm/drm_modeset_helper_vtables.h>
30 #include <drm/drm_vblank.h>
31 
32 #include "amdgpu.h"
33 #include "amdgpu_pm.h"
34 #include "amdgpu_i2c.h"
35 #include "atom.h"
36 #include "amdgpu_atombios.h"
37 #include "atombios_crtc.h"
38 #include "atombios_encoders.h"
39 #include "amdgpu_pll.h"
40 #include "amdgpu_connectors.h"
41 #include "amdgpu_display.h"
42 
43 #include "bif/bif_3_0_d.h"
44 #include "bif/bif_3_0_sh_mask.h"
45 #include "oss/oss_1_0_d.h"
46 #include "oss/oss_1_0_sh_mask.h"
47 #include "gca/gfx_6_0_d.h"
48 #include "gca/gfx_6_0_sh_mask.h"
49 #include "gmc/gmc_6_0_d.h"
50 #include "gmc/gmc_6_0_sh_mask.h"
51 #include "dce/dce_6_0_d.h"
52 #include "dce/dce_6_0_sh_mask.h"
53 #include "gca/gfx_7_2_enum.h"
54 #include "dce_v6_0.h"
55 #include "si_enums.h"
56 
57 static void dce_v6_0_set_display_funcs(struct amdgpu_device *adev);
58 static void dce_v6_0_set_irq_funcs(struct amdgpu_device *adev);
59 
60 static const u32 crtc_offsets[6] =
61 {
62 	SI_CRTC0_REGISTER_OFFSET,
63 	SI_CRTC1_REGISTER_OFFSET,
64 	SI_CRTC2_REGISTER_OFFSET,
65 	SI_CRTC3_REGISTER_OFFSET,
66 	SI_CRTC4_REGISTER_OFFSET,
67 	SI_CRTC5_REGISTER_OFFSET
68 };
69 
70 static const u32 hpd_offsets[] =
71 {
72 	mmDC_HPD1_INT_STATUS - mmDC_HPD1_INT_STATUS,
73 	mmDC_HPD2_INT_STATUS - mmDC_HPD1_INT_STATUS,
74 	mmDC_HPD3_INT_STATUS - mmDC_HPD1_INT_STATUS,
75 	mmDC_HPD4_INT_STATUS - mmDC_HPD1_INT_STATUS,
76 	mmDC_HPD5_INT_STATUS - mmDC_HPD1_INT_STATUS,
77 	mmDC_HPD6_INT_STATUS - mmDC_HPD1_INT_STATUS,
78 };
79 
80 static const uint32_t dig_offsets[] = {
81 	SI_CRTC0_REGISTER_OFFSET,
82 	SI_CRTC1_REGISTER_OFFSET,
83 	SI_CRTC2_REGISTER_OFFSET,
84 	SI_CRTC3_REGISTER_OFFSET,
85 	SI_CRTC4_REGISTER_OFFSET,
86 	SI_CRTC5_REGISTER_OFFSET,
87 	(0x13830 - 0x7030) >> 2,
88 };
89 
90 static const struct {
91 	uint32_t	reg;
92 	uint32_t	vblank;
93 	uint32_t	vline;
94 	uint32_t	hpd;
95 
96 } interrupt_status_offsets[6] = { {
97 	.reg = mmDISP_INTERRUPT_STATUS,
98 	.vblank = DISP_INTERRUPT_STATUS__LB_D1_VBLANK_INTERRUPT_MASK,
99 	.vline = DISP_INTERRUPT_STATUS__LB_D1_VLINE_INTERRUPT_MASK,
100 	.hpd = DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT_MASK
101 }, {
102 	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE,
103 	.vblank = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VBLANK_INTERRUPT_MASK,
104 	.vline = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VLINE_INTERRUPT_MASK,
105 	.hpd = DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_INTERRUPT_MASK
106 }, {
107 	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE2,
108 	.vblank = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VBLANK_INTERRUPT_MASK,
109 	.vline = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VLINE_INTERRUPT_MASK,
110 	.hpd = DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_INTERRUPT_MASK
111 }, {
112 	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE3,
113 	.vblank = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VBLANK_INTERRUPT_MASK,
114 	.vline = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VLINE_INTERRUPT_MASK,
115 	.hpd = DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_INTERRUPT_MASK
116 }, {
117 	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE4,
118 	.vblank = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VBLANK_INTERRUPT_MASK,
119 	.vline = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VLINE_INTERRUPT_MASK,
120 	.hpd = DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_INTERRUPT_MASK
121 }, {
122 	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE5,
123 	.vblank = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VBLANK_INTERRUPT_MASK,
124 	.vline = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VLINE_INTERRUPT_MASK,
125 	.hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK
126 } };
127 
128 static u32 dce_v6_0_audio_endpt_rreg(struct amdgpu_device *adev,
129 				     u32 block_offset, u32 reg)
130 {
131 	unsigned long flags;
132 	u32 r;
133 
134 	spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags);
135 	WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
136 	r = RREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset);
137 	spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
138 
139 	return r;
140 }
141 
142 static void dce_v6_0_audio_endpt_wreg(struct amdgpu_device *adev,
143 				      u32 block_offset, u32 reg, u32 v)
144 {
145 	unsigned long flags;
146 
147 	spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags);
148 	WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset,
149 		reg | AZALIA_F0_CODEC_ENDPOINT_INDEX__AZALIA_ENDPOINT_REG_WRITE_EN_MASK);
150 	WREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset, v);
151 	spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
152 }
153 
154 static u32 dce_v6_0_vblank_get_counter(struct amdgpu_device *adev, int crtc)
155 {
156 	if (crtc >= adev->mode_info.num_crtc)
157 		return 0;
158 	else
159 		return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
160 }
161 
162 static void dce_v6_0_pageflip_interrupt_init(struct amdgpu_device *adev)
163 {
164 	unsigned i;
165 
166 	/* Enable pflip interrupts */
167 	for (i = 0; i < adev->mode_info.num_crtc; i++)
168 		amdgpu_irq_get(adev, &adev->pageflip_irq, i);
169 }
170 
171 static void dce_v6_0_pageflip_interrupt_fini(struct amdgpu_device *adev)
172 {
173 	unsigned i;
174 
175 	/* Disable pflip interrupts */
176 	for (i = 0; i < adev->mode_info.num_crtc; i++)
177 		amdgpu_irq_put(adev, &adev->pageflip_irq, i);
178 }
179 
180 /**
181  * dce_v6_0_page_flip - pageflip callback.
182  *
183  * @adev: amdgpu_device pointer
184  * @crtc_id: crtc to cleanup pageflip on
185  * @crtc_base: new address of the crtc (GPU MC address)
186  * @async: asynchronous flip
187  *
188  * Does the actual pageflip (evergreen+).
189  * During vblank we take the crtc lock and wait for the update_pending
190  * bit to go high, when it does, we release the lock, and allow the
191  * double buffered update to take place.
192  * Returns the current update pending status.
193  */
194 static void dce_v6_0_page_flip(struct amdgpu_device *adev,
195 			       int crtc_id, u64 crtc_base, bool async)
196 {
197 	struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
198 	struct drm_framebuffer *fb = amdgpu_crtc->base.primary->fb;
199 
200 	/* flip at hsync for async, default is vsync */
201 	WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, async ?
202 	       GRPH_FLIP_CONTROL__GRPH_SURFACE_UPDATE_H_RETRACE_EN_MASK : 0);
203 	/* update pitch */
204 	WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset,
205 	       fb->pitches[0] / fb->format->cpp[0]);
206 	/* update the scanout addresses */
207 	WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
208 	       upper_32_bits(crtc_base));
209 	WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
210 	       (u32)crtc_base);
211 
212 	/* post the write */
213 	RREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset);
214 }
215 
216 static int dce_v6_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
217 					u32 *vbl, u32 *position)
218 {
219 	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
220 		return -EINVAL;
221 	*vbl = RREG32(mmCRTC_V_BLANK_START_END + crtc_offsets[crtc]);
222 	*position = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
223 
224 	return 0;
225 
226 }
227 
228 /**
229  * dce_v6_0_hpd_sense - hpd sense callback.
230  *
231  * @adev: amdgpu_device pointer
232  * @hpd: hpd (hotplug detect) pin
233  *
234  * Checks if a digital monitor is connected (evergreen+).
235  * Returns true if connected, false if not connected.
236  */
237 static bool dce_v6_0_hpd_sense(struct amdgpu_device *adev,
238 			       enum amdgpu_hpd_id hpd)
239 {
240 	bool connected = false;
241 
242 	if (hpd >= adev->mode_info.num_hpd)
243 		return connected;
244 
245 	if (RREG32(mmDC_HPD1_INT_STATUS + hpd_offsets[hpd]) & DC_HPD1_INT_STATUS__DC_HPD1_SENSE_MASK)
246 		connected = true;
247 
248 	return connected;
249 }
250 
251 /**
252  * dce_v6_0_hpd_set_polarity - hpd set polarity callback.
253  *
254  * @adev: amdgpu_device pointer
255  * @hpd: hpd (hotplug detect) pin
256  *
257  * Set the polarity of the hpd pin (evergreen+).
258  */
259 static void dce_v6_0_hpd_set_polarity(struct amdgpu_device *adev,
260 				      enum amdgpu_hpd_id hpd)
261 {
262 	u32 tmp;
263 	bool connected = dce_v6_0_hpd_sense(adev, hpd);
264 
265 	if (hpd >= adev->mode_info.num_hpd)
266 		return;
267 
268 	tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
269 	if (connected)
270 		tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK;
271 	else
272 		tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK;
273 	WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
274 }
275 
276 /**
277  * dce_v6_0_hpd_init - hpd setup callback.
278  *
279  * @adev: amdgpu_device pointer
280  *
281  * Setup the hpd pins used by the card (evergreen+).
282  * Enable the pin, set the polarity, and enable the hpd interrupts.
283  */
284 static void dce_v6_0_hpd_init(struct amdgpu_device *adev)
285 {
286 	struct drm_device *dev = adev_to_drm(adev);
287 	struct drm_connector *connector;
288 	struct drm_connector_list_iter iter;
289 	u32 tmp;
290 
291 	drm_connector_list_iter_begin(dev, &iter);
292 	drm_for_each_connector_iter(connector, &iter) {
293 		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
294 
295 		if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
296 			continue;
297 
298 		tmp = RREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
299 		tmp |= DC_HPD1_CONTROL__DC_HPD1_EN_MASK;
300 		WREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
301 
302 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
303 		    connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
304 			/* don't try to enable hpd on eDP or LVDS avoid breaking the
305 			 * aux dp channel on imac and help (but not completely fix)
306 			 * https://bugzilla.redhat.com/show_bug.cgi?id=726143
307 			 * also avoid interrupt storms during dpms.
308 			 */
309 			tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
310 			tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
311 			WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
312 			continue;
313 		}
314 
315 		dce_v6_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
316 		amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
317 	}
318 	drm_connector_list_iter_end(&iter);
319 }
320 
321 /**
322  * dce_v6_0_hpd_fini - hpd tear down callback.
323  *
324  * @adev: amdgpu_device pointer
325  *
326  * Tear down the hpd pins used by the card (evergreen+).
327  * Disable the hpd interrupts.
328  */
329 static void dce_v6_0_hpd_fini(struct amdgpu_device *adev)
330 {
331 	struct drm_device *dev = adev_to_drm(adev);
332 	struct drm_connector *connector;
333 	struct drm_connector_list_iter iter;
334 	u32 tmp;
335 
336 	drm_connector_list_iter_begin(dev, &iter);
337 	drm_for_each_connector_iter(connector, &iter) {
338 		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
339 
340 		if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
341 			continue;
342 
343 		tmp = RREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
344 		tmp &= ~DC_HPD1_CONTROL__DC_HPD1_EN_MASK;
345 		WREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
346 
347 		amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
348 	}
349 	drm_connector_list_iter_end(&iter);
350 }
351 
352 static u32 dce_v6_0_hpd_get_gpio_reg(struct amdgpu_device *adev)
353 {
354 	return mmDC_GPIO_HPD_A;
355 }
356 
357 static void dce_v6_0_set_vga_render_state(struct amdgpu_device *adev,
358 					  bool render)
359 {
360 	if (!render)
361 		WREG32(mmVGA_RENDER_CONTROL,
362 			RREG32(mmVGA_RENDER_CONTROL) & VGA_VSTATUS_CNTL);
363 
364 }
365 
366 static int dce_v6_0_get_num_crtc(struct amdgpu_device *adev)
367 {
368 	switch (adev->asic_type) {
369 	case CHIP_TAHITI:
370 	case CHIP_PITCAIRN:
371 	case CHIP_VERDE:
372 		return 6;
373 	case CHIP_OLAND:
374 		return 2;
375 	default:
376 		return 0;
377 	}
378 }
379 
380 void dce_v6_0_disable_dce(struct amdgpu_device *adev)
381 {
382 	/*Disable VGA render and enabled crtc, if has DCE engine*/
383 	if (amdgpu_atombios_has_dce_engine_info(adev)) {
384 		u32 tmp;
385 		int crtc_enabled, i;
386 
387 		dce_v6_0_set_vga_render_state(adev, false);
388 
389 		/*Disable crtc*/
390 		for (i = 0; i < dce_v6_0_get_num_crtc(adev); i++) {
391 			crtc_enabled = RREG32(mmCRTC_CONTROL + crtc_offsets[i]) &
392 				CRTC_CONTROL__CRTC_MASTER_EN_MASK;
393 			if (crtc_enabled) {
394 				WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
395 				tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
396 				tmp &= ~CRTC_CONTROL__CRTC_MASTER_EN_MASK;
397 				WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp);
398 				WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
399 			}
400 		}
401 	}
402 }
403 
404 static void dce_v6_0_program_fmt(struct drm_encoder *encoder)
405 {
406 
407 	struct drm_device *dev = encoder->dev;
408 	struct amdgpu_device *adev = drm_to_adev(dev);
409 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
410 	struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
411 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
412 	int bpc = 0;
413 	u32 tmp = 0;
414 	enum amdgpu_connector_dither dither = AMDGPU_FMT_DITHER_DISABLE;
415 
416 	if (connector) {
417 		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
418 		bpc = amdgpu_connector_get_monitor_bpc(connector);
419 		dither = amdgpu_connector->dither;
420 	}
421 
422 	/* LVDS FMT is set up by atom */
423 	if (amdgpu_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
424 		return;
425 
426 	if (bpc == 0)
427 		return;
428 
429 
430 	switch (bpc) {
431 	case 6:
432 		if (dither == AMDGPU_FMT_DITHER_ENABLE)
433 			/* XXX sort out optimal dither settings */
434 			tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK |
435 				FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK |
436 				FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK);
437 		else
438 			tmp |= FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK;
439 		break;
440 	case 8:
441 		if (dither == AMDGPU_FMT_DITHER_ENABLE)
442 			/* XXX sort out optimal dither settings */
443 			tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK |
444 				FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK |
445 				FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE_MASK |
446 				FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK |
447 				FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH_MASK);
448 		else
449 			tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK |
450 				FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH_MASK);
451 		break;
452 	case 10:
453 	default:
454 		/* not needed */
455 		break;
456 	}
457 
458 	WREG32(mmFMT_BIT_DEPTH_CONTROL + amdgpu_crtc->crtc_offset, tmp);
459 }
460 
461 /**
462  * si_get_number_of_dram_channels - get the number of dram channels
463  *
464  * @adev: amdgpu_device pointer
465  *
466  * Look up the number of video ram channels (CIK).
467  * Used for display watermark bandwidth calculations
468  * Returns the number of dram channels
469  */
470 static u32 si_get_number_of_dram_channels(struct amdgpu_device *adev)
471 {
472 	u32 tmp = RREG32(mmMC_SHARED_CHMAP);
473 
474 	switch ((tmp & MC_SHARED_CHMAP__NOOFCHAN_MASK) >> MC_SHARED_CHMAP__NOOFCHAN__SHIFT) {
475 	case 0:
476 	default:
477 		return 1;
478 	case 1:
479 		return 2;
480 	case 2:
481 		return 4;
482 	case 3:
483 		return 8;
484 	case 4:
485 		return 3;
486 	case 5:
487 		return 6;
488 	case 6:
489 		return 10;
490 	case 7:
491 		return 12;
492 	case 8:
493 		return 16;
494 	}
495 }
496 
497 struct dce6_wm_params {
498 	u32 dram_channels; /* number of dram channels */
499 	u32 yclk;          /* bandwidth per dram data pin in kHz */
500 	u32 sclk;          /* engine clock in kHz */
501 	u32 disp_clk;      /* display clock in kHz */
502 	u32 src_width;     /* viewport width */
503 	u32 active_time;   /* active display time in ns */
504 	u32 blank_time;    /* blank time in ns */
505 	bool interlaced;    /* mode is interlaced */
506 	fixed20_12 vsc;    /* vertical scale ratio */
507 	u32 num_heads;     /* number of active crtcs */
508 	u32 bytes_per_pixel; /* bytes per pixel display + overlay */
509 	u32 lb_size;       /* line buffer allocated to pipe */
510 	u32 vtaps;         /* vertical scaler taps */
511 };
512 
513 /**
514  * dce_v6_0_dram_bandwidth - get the dram bandwidth
515  *
516  * @wm: watermark calculation data
517  *
518  * Calculate the raw dram bandwidth (CIK).
519  * Used for display watermark bandwidth calculations
520  * Returns the dram bandwidth in MBytes/s
521  */
522 static u32 dce_v6_0_dram_bandwidth(struct dce6_wm_params *wm)
523 {
524 	/* Calculate raw DRAM Bandwidth */
525 	fixed20_12 dram_efficiency; /* 0.7 */
526 	fixed20_12 yclk, dram_channels, bandwidth;
527 	fixed20_12 a;
528 
529 	a.full = dfixed_const(1000);
530 	yclk.full = dfixed_const(wm->yclk);
531 	yclk.full = dfixed_div(yclk, a);
532 	dram_channels.full = dfixed_const(wm->dram_channels * 4);
533 	a.full = dfixed_const(10);
534 	dram_efficiency.full = dfixed_const(7);
535 	dram_efficiency.full = dfixed_div(dram_efficiency, a);
536 	bandwidth.full = dfixed_mul(dram_channels, yclk);
537 	bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);
538 
539 	return dfixed_trunc(bandwidth);
540 }
541 
542 /**
543  * dce_v6_0_dram_bandwidth_for_display - get the dram bandwidth for display
544  *
545  * @wm: watermark calculation data
546  *
547  * Calculate the dram bandwidth used for display (CIK).
548  * Used for display watermark bandwidth calculations
549  * Returns the dram bandwidth for display in MBytes/s
550  */
551 static u32 dce_v6_0_dram_bandwidth_for_display(struct dce6_wm_params *wm)
552 {
553 	/* Calculate DRAM Bandwidth and the part allocated to display. */
554 	fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */
555 	fixed20_12 yclk, dram_channels, bandwidth;
556 	fixed20_12 a;
557 
558 	a.full = dfixed_const(1000);
559 	yclk.full = dfixed_const(wm->yclk);
560 	yclk.full = dfixed_div(yclk, a);
561 	dram_channels.full = dfixed_const(wm->dram_channels * 4);
562 	a.full = dfixed_const(10);
563 	disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */
564 	disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
565 	bandwidth.full = dfixed_mul(dram_channels, yclk);
566 	bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);
567 
568 	return dfixed_trunc(bandwidth);
569 }
570 
571 /**
572  * dce_v6_0_data_return_bandwidth - get the data return bandwidth
573  *
574  * @wm: watermark calculation data
575  *
576  * Calculate the data return bandwidth used for display (CIK).
577  * Used for display watermark bandwidth calculations
578  * Returns the data return bandwidth in MBytes/s
579  */
580 static u32 dce_v6_0_data_return_bandwidth(struct dce6_wm_params *wm)
581 {
582 	/* Calculate the display Data return Bandwidth */
583 	fixed20_12 return_efficiency; /* 0.8 */
584 	fixed20_12 sclk, bandwidth;
585 	fixed20_12 a;
586 
587 	a.full = dfixed_const(1000);
588 	sclk.full = dfixed_const(wm->sclk);
589 	sclk.full = dfixed_div(sclk, a);
590 	a.full = dfixed_const(10);
591 	return_efficiency.full = dfixed_const(8);
592 	return_efficiency.full = dfixed_div(return_efficiency, a);
593 	a.full = dfixed_const(32);
594 	bandwidth.full = dfixed_mul(a, sclk);
595 	bandwidth.full = dfixed_mul(bandwidth, return_efficiency);
596 
597 	return dfixed_trunc(bandwidth);
598 }
599 
600 /**
601  * dce_v6_0_dmif_request_bandwidth - get the dmif bandwidth
602  *
603  * @wm: watermark calculation data
604  *
605  * Calculate the dmif bandwidth used for display (CIK).
606  * Used for display watermark bandwidth calculations
607  * Returns the dmif bandwidth in MBytes/s
608  */
609 static u32 dce_v6_0_dmif_request_bandwidth(struct dce6_wm_params *wm)
610 {
611 	/* Calculate the DMIF Request Bandwidth */
612 	fixed20_12 disp_clk_request_efficiency; /* 0.8 */
613 	fixed20_12 disp_clk, bandwidth;
614 	fixed20_12 a, b;
615 
616 	a.full = dfixed_const(1000);
617 	disp_clk.full = dfixed_const(wm->disp_clk);
618 	disp_clk.full = dfixed_div(disp_clk, a);
619 	a.full = dfixed_const(32);
620 	b.full = dfixed_mul(a, disp_clk);
621 
622 	a.full = dfixed_const(10);
623 	disp_clk_request_efficiency.full = dfixed_const(8);
624 	disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
625 
626 	bandwidth.full = dfixed_mul(b, disp_clk_request_efficiency);
627 
628 	return dfixed_trunc(bandwidth);
629 }
630 
631 /**
632  * dce_v6_0_available_bandwidth - get the min available bandwidth
633  *
634  * @wm: watermark calculation data
635  *
636  * Calculate the min available bandwidth used for display (CIK).
637  * Used for display watermark bandwidth calculations
638  * Returns the min available bandwidth in MBytes/s
639  */
640 static u32 dce_v6_0_available_bandwidth(struct dce6_wm_params *wm)
641 {
642 	/* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
643 	u32 dram_bandwidth = dce_v6_0_dram_bandwidth(wm);
644 	u32 data_return_bandwidth = dce_v6_0_data_return_bandwidth(wm);
645 	u32 dmif_req_bandwidth = dce_v6_0_dmif_request_bandwidth(wm);
646 
647 	return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
648 }
649 
650 /**
651  * dce_v6_0_average_bandwidth - get the average available bandwidth
652  *
653  * @wm: watermark calculation data
654  *
655  * Calculate the average available bandwidth used for display (CIK).
656  * Used for display watermark bandwidth calculations
657  * Returns the average available bandwidth in MBytes/s
658  */
659 static u32 dce_v6_0_average_bandwidth(struct dce6_wm_params *wm)
660 {
661 	/* Calculate the display mode Average Bandwidth
662 	 * DisplayMode should contain the source and destination dimensions,
663 	 * timing, etc.
664 	 */
665 	fixed20_12 bpp;
666 	fixed20_12 line_time;
667 	fixed20_12 src_width;
668 	fixed20_12 bandwidth;
669 	fixed20_12 a;
670 
671 	a.full = dfixed_const(1000);
672 	line_time.full = dfixed_const(wm->active_time + wm->blank_time);
673 	line_time.full = dfixed_div(line_time, a);
674 	bpp.full = dfixed_const(wm->bytes_per_pixel);
675 	src_width.full = dfixed_const(wm->src_width);
676 	bandwidth.full = dfixed_mul(src_width, bpp);
677 	bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
678 	bandwidth.full = dfixed_div(bandwidth, line_time);
679 
680 	return dfixed_trunc(bandwidth);
681 }
682 
683 /**
684  * dce_v6_0_latency_watermark - get the latency watermark
685  *
686  * @wm: watermark calculation data
687  *
688  * Calculate the latency watermark (CIK).
689  * Used for display watermark bandwidth calculations
690  * Returns the latency watermark in ns
691  */
692 static u32 dce_v6_0_latency_watermark(struct dce6_wm_params *wm)
693 {
694 	/* First calculate the latency in ns */
695 	u32 mc_latency = 2000; /* 2000 ns. */
696 	u32 available_bandwidth = dce_v6_0_available_bandwidth(wm);
697 	u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
698 	u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
699 	u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */
700 	u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
701 		(wm->num_heads * cursor_line_pair_return_time);
702 	u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
703 	u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
704 	u32 tmp, dmif_size = 12288;
705 	fixed20_12 a, b, c;
706 
707 	if (wm->num_heads == 0)
708 		return 0;
709 
710 	a.full = dfixed_const(2);
711 	b.full = dfixed_const(1);
712 	if ((wm->vsc.full > a.full) ||
713 	    ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
714 	    (wm->vtaps >= 5) ||
715 	    ((wm->vsc.full >= a.full) && wm->interlaced))
716 		max_src_lines_per_dst_line = 4;
717 	else
718 		max_src_lines_per_dst_line = 2;
719 
720 	a.full = dfixed_const(available_bandwidth);
721 	b.full = dfixed_const(wm->num_heads);
722 	a.full = dfixed_div(a, b);
723 	tmp = div_u64((u64) dmif_size * (u64) wm->disp_clk, mc_latency + 512);
724 	tmp = min(dfixed_trunc(a), tmp);
725 
726 	lb_fill_bw = min(tmp, wm->disp_clk * wm->bytes_per_pixel / 1000);
727 
728 	a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
729 	b.full = dfixed_const(1000);
730 	c.full = dfixed_const(lb_fill_bw);
731 	b.full = dfixed_div(c, b);
732 	a.full = dfixed_div(a, b);
733 	line_fill_time = dfixed_trunc(a);
734 
735 	if (line_fill_time < wm->active_time)
736 		return latency;
737 	else
738 		return latency + (line_fill_time - wm->active_time);
739 
740 }
741 
742 /**
743  * dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display - check
744  * average and available dram bandwidth
745  *
746  * @wm: watermark calculation data
747  *
748  * Check if the display average bandwidth fits in the display
749  * dram bandwidth (CIK).
750  * Used for display watermark bandwidth calculations
751  * Returns true if the display fits, false if not.
752  */
753 static bool dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display(struct dce6_wm_params *wm)
754 {
755 	if (dce_v6_0_average_bandwidth(wm) <=
756 	    (dce_v6_0_dram_bandwidth_for_display(wm) / wm->num_heads))
757 		return true;
758 	else
759 		return false;
760 }
761 
762 /**
763  * dce_v6_0_average_bandwidth_vs_available_bandwidth - check
764  * average and available bandwidth
765  *
766  * @wm: watermark calculation data
767  *
768  * Check if the display average bandwidth fits in the display
769  * available bandwidth (CIK).
770  * Used for display watermark bandwidth calculations
771  * Returns true if the display fits, false if not.
772  */
773 static bool dce_v6_0_average_bandwidth_vs_available_bandwidth(struct dce6_wm_params *wm)
774 {
775 	if (dce_v6_0_average_bandwidth(wm) <=
776 	    (dce_v6_0_available_bandwidth(wm) / wm->num_heads))
777 		return true;
778 	else
779 		return false;
780 }
781 
782 /**
783  * dce_v6_0_check_latency_hiding - check latency hiding
784  *
785  * @wm: watermark calculation data
786  *
787  * Check latency hiding (CIK).
788  * Used for display watermark bandwidth calculations
789  * Returns true if the display fits, false if not.
790  */
791 static bool dce_v6_0_check_latency_hiding(struct dce6_wm_params *wm)
792 {
793 	u32 lb_partitions = wm->lb_size / wm->src_width;
794 	u32 line_time = wm->active_time + wm->blank_time;
795 	u32 latency_tolerant_lines;
796 	u32 latency_hiding;
797 	fixed20_12 a;
798 
799 	a.full = dfixed_const(1);
800 	if (wm->vsc.full > a.full)
801 		latency_tolerant_lines = 1;
802 	else {
803 		if (lb_partitions <= (wm->vtaps + 1))
804 			latency_tolerant_lines = 1;
805 		else
806 			latency_tolerant_lines = 2;
807 	}
808 
809 	latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
810 
811 	if (dce_v6_0_latency_watermark(wm) <= latency_hiding)
812 		return true;
813 	else
814 		return false;
815 }
816 
817 /**
818  * dce_v6_0_program_watermarks - program display watermarks
819  *
820  * @adev: amdgpu_device pointer
821  * @amdgpu_crtc: the selected display controller
822  * @lb_size: line buffer size
823  * @num_heads: number of display controllers in use
824  *
825  * Calculate and program the display watermarks for the
826  * selected display controller (CIK).
827  */
828 static void dce_v6_0_program_watermarks(struct amdgpu_device *adev,
829 					struct amdgpu_crtc *amdgpu_crtc,
830 					u32 lb_size, u32 num_heads)
831 {
832 	struct drm_display_mode *mode = &amdgpu_crtc->base.mode;
833 	struct dce6_wm_params wm_low, wm_high;
834 	u32 dram_channels;
835 	u32 active_time;
836 	u32 line_time = 0;
837 	u32 latency_watermark_a = 0, latency_watermark_b = 0;
838 	u32 priority_a_mark = 0, priority_b_mark = 0;
839 	u32 priority_a_cnt = PRIORITY_OFF;
840 	u32 priority_b_cnt = PRIORITY_OFF;
841 	u32 tmp, arb_control3, lb_vblank_lead_lines = 0;
842 	fixed20_12 a, b, c;
843 
844 	if (amdgpu_crtc->base.enabled && num_heads && mode) {
845 		active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
846 					    (u32)mode->clock);
847 		line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
848 					  (u32)mode->clock);
849 		line_time = min_t(u32, line_time, 65535);
850 		priority_a_cnt = 0;
851 		priority_b_cnt = 0;
852 
853 		dram_channels = si_get_number_of_dram_channels(adev);
854 
855 		/* watermark for high clocks */
856 		if (adev->pm.dpm_enabled) {
857 			wm_high.yclk =
858 				amdgpu_dpm_get_mclk(adev, false) * 10;
859 			wm_high.sclk =
860 				amdgpu_dpm_get_sclk(adev, false) * 10;
861 		} else {
862 			wm_high.yclk = adev->pm.current_mclk * 10;
863 			wm_high.sclk = adev->pm.current_sclk * 10;
864 		}
865 
866 		wm_high.disp_clk = mode->clock;
867 		wm_high.src_width = mode->crtc_hdisplay;
868 		wm_high.active_time = active_time;
869 		wm_high.blank_time = line_time - wm_high.active_time;
870 		wm_high.interlaced = false;
871 		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
872 			wm_high.interlaced = true;
873 		wm_high.vsc = amdgpu_crtc->vsc;
874 		wm_high.vtaps = 1;
875 		if (amdgpu_crtc->rmx_type != RMX_OFF)
876 			wm_high.vtaps = 2;
877 		wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */
878 		wm_high.lb_size = lb_size;
879 		wm_high.dram_channels = dram_channels;
880 		wm_high.num_heads = num_heads;
881 
882 		if (adev->pm.dpm_enabled) {
883 		/* watermark for low clocks */
884 			wm_low.yclk =
885 				amdgpu_dpm_get_mclk(adev, true) * 10;
886 			wm_low.sclk =
887 				amdgpu_dpm_get_sclk(adev, true) * 10;
888 		} else {
889 			wm_low.yclk = adev->pm.current_mclk * 10;
890 			wm_low.sclk = adev->pm.current_sclk * 10;
891 		}
892 
893 		wm_low.disp_clk = mode->clock;
894 		wm_low.src_width = mode->crtc_hdisplay;
895 		wm_low.active_time = active_time;
896 		wm_low.blank_time = line_time - wm_low.active_time;
897 		wm_low.interlaced = false;
898 		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
899 			wm_low.interlaced = true;
900 		wm_low.vsc = amdgpu_crtc->vsc;
901 		wm_low.vtaps = 1;
902 		if (amdgpu_crtc->rmx_type != RMX_OFF)
903 			wm_low.vtaps = 2;
904 		wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */
905 		wm_low.lb_size = lb_size;
906 		wm_low.dram_channels = dram_channels;
907 		wm_low.num_heads = num_heads;
908 
909 		/* set for high clocks */
910 		latency_watermark_a = min_t(u32, dce_v6_0_latency_watermark(&wm_high), 65535);
911 		/* set for low clocks */
912 		latency_watermark_b = min_t(u32, dce_v6_0_latency_watermark(&wm_low), 65535);
913 
914 		/* possibly force display priority to high */
915 		/* should really do this at mode validation time... */
916 		if (!dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) ||
917 		    !dce_v6_0_average_bandwidth_vs_available_bandwidth(&wm_high) ||
918 		    !dce_v6_0_check_latency_hiding(&wm_high) ||
919 		    (adev->mode_info.disp_priority == 2)) {
920 			DRM_DEBUG_KMS("force priority to high\n");
921 			priority_a_cnt |= PRIORITY_ALWAYS_ON;
922 			priority_b_cnt |= PRIORITY_ALWAYS_ON;
923 		}
924 		if (!dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) ||
925 		    !dce_v6_0_average_bandwidth_vs_available_bandwidth(&wm_low) ||
926 		    !dce_v6_0_check_latency_hiding(&wm_low) ||
927 		    (adev->mode_info.disp_priority == 2)) {
928 			DRM_DEBUG_KMS("force priority to high\n");
929 			priority_a_cnt |= PRIORITY_ALWAYS_ON;
930 			priority_b_cnt |= PRIORITY_ALWAYS_ON;
931 		}
932 
933 		a.full = dfixed_const(1000);
934 		b.full = dfixed_const(mode->clock);
935 		b.full = dfixed_div(b, a);
936 		c.full = dfixed_const(latency_watermark_a);
937 		c.full = dfixed_mul(c, b);
938 		c.full = dfixed_mul(c, amdgpu_crtc->hsc);
939 		c.full = dfixed_div(c, a);
940 		a.full = dfixed_const(16);
941 		c.full = dfixed_div(c, a);
942 		priority_a_mark = dfixed_trunc(c);
943 		priority_a_cnt |= priority_a_mark & PRIORITY_MARK_MASK;
944 
945 		a.full = dfixed_const(1000);
946 		b.full = dfixed_const(mode->clock);
947 		b.full = dfixed_div(b, a);
948 		c.full = dfixed_const(latency_watermark_b);
949 		c.full = dfixed_mul(c, b);
950 		c.full = dfixed_mul(c, amdgpu_crtc->hsc);
951 		c.full = dfixed_div(c, a);
952 		a.full = dfixed_const(16);
953 		c.full = dfixed_div(c, a);
954 		priority_b_mark = dfixed_trunc(c);
955 		priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK;
956 
957 		lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay);
958 	}
959 
960 	/* select wm A */
961 	arb_control3 = RREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset);
962 	tmp = arb_control3;
963 	tmp &= ~LATENCY_WATERMARK_MASK(3);
964 	tmp |= LATENCY_WATERMARK_MASK(1);
965 	WREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, tmp);
966 	WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset,
967 	       ((latency_watermark_a << DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT)  |
968 		(line_time << DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT)));
969 	/* select wm B */
970 	tmp = RREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset);
971 	tmp &= ~LATENCY_WATERMARK_MASK(3);
972 	tmp |= LATENCY_WATERMARK_MASK(2);
973 	WREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, tmp);
974 	WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset,
975 	       ((latency_watermark_b << DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT) |
976 		(line_time << DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT)));
977 	/* restore original selection */
978 	WREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, arb_control3);
979 
980 	/* write the priority marks */
981 	WREG32(mmPRIORITY_A_CNT + amdgpu_crtc->crtc_offset, priority_a_cnt);
982 	WREG32(mmPRIORITY_B_CNT + amdgpu_crtc->crtc_offset, priority_b_cnt);
983 
984 	/* save values for DPM */
985 	amdgpu_crtc->line_time = line_time;
986 	amdgpu_crtc->wm_high = latency_watermark_a;
987 
988 	/* Save number of lines the linebuffer leads before the scanout */
989 	amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines;
990 }
991 
992 /* watermark setup */
993 static u32 dce_v6_0_line_buffer_adjust(struct amdgpu_device *adev,
994 				   struct amdgpu_crtc *amdgpu_crtc,
995 				   struct drm_display_mode *mode,
996 				   struct drm_display_mode *other_mode)
997 {
998 	u32 tmp, buffer_alloc, i;
999 	u32 pipe_offset = amdgpu_crtc->crtc_id * 0x8;
1000 	/*
1001 	 * Line Buffer Setup
1002 	 * There are 3 line buffers, each one shared by 2 display controllers.
1003 	 * mmDC_LB_MEMORY_SPLIT controls how that line buffer is shared between
1004 	 * the display controllers.  The paritioning is done via one of four
1005 	 * preset allocations specified in bits 21:20:
1006 	 *  0 - half lb
1007 	 *  2 - whole lb, other crtc must be disabled
1008 	 */
1009 	/* this can get tricky if we have two large displays on a paired group
1010 	 * of crtcs.  Ideally for multiple large displays we'd assign them to
1011 	 * non-linked crtcs for maximum line buffer allocation.
1012 	 */
1013 	if (amdgpu_crtc->base.enabled && mode) {
1014 		if (other_mode) {
1015 			tmp = 0; /* 1/2 */
1016 			buffer_alloc = 1;
1017 		} else {
1018 			tmp = 2; /* whole */
1019 			buffer_alloc = 2;
1020 		}
1021 	} else {
1022 		tmp = 0;
1023 		buffer_alloc = 0;
1024 	}
1025 
1026 	WREG32(mmDC_LB_MEMORY_SPLIT + amdgpu_crtc->crtc_offset,
1027 	       DC_LB_MEMORY_CONFIG(tmp));
1028 
1029 	WREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset,
1030 	       (buffer_alloc << PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATED__SHIFT));
1031 	for (i = 0; i < adev->usec_timeout; i++) {
1032 		if (RREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset) &
1033 		    PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATION_COMPLETED_MASK)
1034 			break;
1035 		udelay(1);
1036 	}
1037 
1038 	if (amdgpu_crtc->base.enabled && mode) {
1039 		switch (tmp) {
1040 		case 0:
1041 		default:
1042 			return 4096 * 2;
1043 		case 2:
1044 			return 8192 * 2;
1045 		}
1046 	}
1047 
1048 	/* controller not enabled, so no lb used */
1049 	return 0;
1050 }
1051 
1052 
1053 /**
1054  * dce_v6_0_bandwidth_update - program display watermarks
1055  *
1056  * @adev: amdgpu_device pointer
1057  *
1058  * Calculate and program the display watermarks and line
1059  * buffer allocation (CIK).
1060  */
1061 static void dce_v6_0_bandwidth_update(struct amdgpu_device *adev)
1062 {
1063 	struct drm_display_mode *mode0 = NULL;
1064 	struct drm_display_mode *mode1 = NULL;
1065 	u32 num_heads = 0, lb_size;
1066 	int i;
1067 
1068 	if (!adev->mode_info.mode_config_initialized)
1069 		return;
1070 
1071 	amdgpu_display_update_priority(adev);
1072 
1073 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
1074 		if (adev->mode_info.crtcs[i]->base.enabled)
1075 			num_heads++;
1076 	}
1077 	for (i = 0; i < adev->mode_info.num_crtc; i += 2) {
1078 		mode0 = &adev->mode_info.crtcs[i]->base.mode;
1079 		mode1 = &adev->mode_info.crtcs[i+1]->base.mode;
1080 		lb_size = dce_v6_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i], mode0, mode1);
1081 		dce_v6_0_program_watermarks(adev, adev->mode_info.crtcs[i], lb_size, num_heads);
1082 		lb_size = dce_v6_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i+1], mode1, mode0);
1083 		dce_v6_0_program_watermarks(adev, adev->mode_info.crtcs[i+1], lb_size, num_heads);
1084 	}
1085 }
1086 
1087 static void dce_v6_0_audio_get_connected_pins(struct amdgpu_device *adev)
1088 {
1089 	int i;
1090 	u32 tmp;
1091 
1092 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1093 		tmp = RREG32_AUDIO_ENDPT(adev->mode_info.audio.pin[i].offset,
1094 				ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT);
1095 		if (REG_GET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT,
1096 					PORT_CONNECTIVITY))
1097 			adev->mode_info.audio.pin[i].connected = false;
1098 		else
1099 			adev->mode_info.audio.pin[i].connected = true;
1100 	}
1101 
1102 }
1103 
1104 static struct amdgpu_audio_pin *dce_v6_0_audio_get_pin(struct amdgpu_device *adev)
1105 {
1106 	int i;
1107 
1108 	dce_v6_0_audio_get_connected_pins(adev);
1109 
1110 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1111 		if (adev->mode_info.audio.pin[i].connected)
1112 			return &adev->mode_info.audio.pin[i];
1113 	}
1114 	DRM_ERROR("No connected audio pins found!\n");
1115 	return NULL;
1116 }
1117 
1118 static void dce_v6_0_audio_select_pin(struct drm_encoder *encoder)
1119 {
1120 	struct amdgpu_device *adev = drm_to_adev(encoder->dev);
1121 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1122 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1123 
1124 	if (!dig || !dig->afmt || !dig->afmt->pin)
1125 		return;
1126 
1127 	WREG32(mmAFMT_AUDIO_SRC_CONTROL + dig->afmt->offset,
1128 	       REG_SET_FIELD(0, AFMT_AUDIO_SRC_CONTROL, AFMT_AUDIO_SRC_SELECT,
1129 		             dig->afmt->pin->id));
1130 }
1131 
1132 static void dce_v6_0_audio_write_latency_fields(struct drm_encoder *encoder,
1133 						struct drm_display_mode *mode)
1134 {
1135 	struct drm_device *dev = encoder->dev;
1136 	struct amdgpu_device *adev = drm_to_adev(dev);
1137 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1138 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1139 	struct drm_connector *connector;
1140 	struct drm_connector_list_iter iter;
1141 	struct amdgpu_connector *amdgpu_connector = NULL;
1142 	int interlace = 0;
1143 	u32 tmp;
1144 
1145 	drm_connector_list_iter_begin(dev, &iter);
1146 	drm_for_each_connector_iter(connector, &iter) {
1147 		if (connector->encoder == encoder) {
1148 			amdgpu_connector = to_amdgpu_connector(connector);
1149 			break;
1150 		}
1151 	}
1152 	drm_connector_list_iter_end(&iter);
1153 
1154 	if (!amdgpu_connector) {
1155 		DRM_ERROR("Couldn't find encoder's connector\n");
1156 		return;
1157 	}
1158 
1159 	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1160 		interlace = 1;
1161 
1162 	if (connector->latency_present[interlace]) {
1163 		tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
1164 				VIDEO_LIPSYNC, connector->video_latency[interlace]);
1165 		tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
1166 				AUDIO_LIPSYNC, connector->audio_latency[interlace]);
1167 	} else {
1168 		tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
1169 				VIDEO_LIPSYNC, 0);
1170 		tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
1171 				AUDIO_LIPSYNC, 0);
1172 	}
1173 	WREG32_AUDIO_ENDPT(dig->afmt->pin->offset,
1174 			   ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp);
1175 }
1176 
1177 static void dce_v6_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
1178 {
1179 	struct drm_device *dev = encoder->dev;
1180 	struct amdgpu_device *adev = drm_to_adev(dev);
1181 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1182 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1183 	struct drm_connector *connector;
1184 	struct drm_connector_list_iter iter;
1185 	struct amdgpu_connector *amdgpu_connector = NULL;
1186 	u8 *sadb = NULL;
1187 	int sad_count;
1188 	u32 tmp;
1189 
1190 	drm_connector_list_iter_begin(dev, &iter);
1191 	drm_for_each_connector_iter(connector, &iter) {
1192 		if (connector->encoder == encoder) {
1193 			amdgpu_connector = to_amdgpu_connector(connector);
1194 			break;
1195 		}
1196 	}
1197 	drm_connector_list_iter_end(&iter);
1198 
1199 	if (!amdgpu_connector) {
1200 		DRM_ERROR("Couldn't find encoder's connector\n");
1201 		return;
1202 	}
1203 
1204 	sad_count = drm_edid_to_speaker_allocation(amdgpu_connector_edid(connector), &sadb);
1205 	if (sad_count < 0) {
1206 		DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
1207 		sad_count = 0;
1208 	}
1209 
1210 	/* program the speaker allocation */
1211 	tmp = RREG32_AUDIO_ENDPT(dig->afmt->pin->offset,
1212 			ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
1213 	tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
1214 			HDMI_CONNECTION, 0);
1215 	tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
1216 			DP_CONNECTION, 0);
1217 
1218 	if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort)
1219 		tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
1220 				DP_CONNECTION, 1);
1221 	else
1222 		tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
1223 				HDMI_CONNECTION, 1);
1224 
1225 	if (sad_count)
1226 		tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
1227 				SPEAKER_ALLOCATION, sadb[0]);
1228 	else
1229 		tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
1230 				SPEAKER_ALLOCATION, 5); /* stereo */
1231 
1232 	WREG32_AUDIO_ENDPT(dig->afmt->pin->offset,
1233 			ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
1234 
1235 	kfree(sadb);
1236 }
1237 
1238 static void dce_v6_0_audio_write_sad_regs(struct drm_encoder *encoder)
1239 {
1240 	struct drm_device *dev = encoder->dev;
1241 	struct amdgpu_device *adev = drm_to_adev(dev);
1242 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1243 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1244 	struct drm_connector *connector;
1245 	struct drm_connector_list_iter iter;
1246 	struct amdgpu_connector *amdgpu_connector = NULL;
1247 	struct cea_sad *sads;
1248 	int i, sad_count;
1249 
1250 	static const u16 eld_reg_to_type[][2] = {
1251 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, HDMI_AUDIO_CODING_TYPE_PCM },
1252 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1, HDMI_AUDIO_CODING_TYPE_AC3 },
1253 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2, HDMI_AUDIO_CODING_TYPE_MPEG1 },
1254 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3, HDMI_AUDIO_CODING_TYPE_MP3 },
1255 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4, HDMI_AUDIO_CODING_TYPE_MPEG2 },
1256 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5, HDMI_AUDIO_CODING_TYPE_AAC_LC },
1257 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6, HDMI_AUDIO_CODING_TYPE_DTS },
1258 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7, HDMI_AUDIO_CODING_TYPE_ATRAC },
1259 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9, HDMI_AUDIO_CODING_TYPE_EAC3 },
1260 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10, HDMI_AUDIO_CODING_TYPE_DTS_HD },
1261 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11, HDMI_AUDIO_CODING_TYPE_MLP },
1262 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
1263 	};
1264 
1265 	drm_connector_list_iter_begin(dev, &iter);
1266 	drm_for_each_connector_iter(connector, &iter) {
1267 		if (connector->encoder == encoder) {
1268 			amdgpu_connector = to_amdgpu_connector(connector);
1269 			break;
1270 		}
1271 	}
1272 	drm_connector_list_iter_end(&iter);
1273 
1274 	if (!amdgpu_connector) {
1275 		DRM_ERROR("Couldn't find encoder's connector\n");
1276 		return;
1277 	}
1278 
1279 	sad_count = drm_edid_to_sad(amdgpu_connector_edid(connector), &sads);
1280 	if (sad_count < 0)
1281 		DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
1282 	if (sad_count <= 0)
1283 		return;
1284 
1285 	for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
1286 		u32 tmp = 0;
1287 		u8 stereo_freqs = 0;
1288 		int max_channels = -1;
1289 		int j;
1290 
1291 		for (j = 0; j < sad_count; j++) {
1292 			struct cea_sad *sad = &sads[j];
1293 
1294 			if (sad->format == eld_reg_to_type[i][1]) {
1295 				if (sad->channels > max_channels) {
1296 					tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
1297 							MAX_CHANNELS, sad->channels);
1298 					tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
1299 							DESCRIPTOR_BYTE_2, sad->byte2);
1300 					tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
1301 							SUPPORTED_FREQUENCIES, sad->freq);
1302 					max_channels = sad->channels;
1303 				}
1304 
1305 				if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM)
1306 					stereo_freqs |= sad->freq;
1307 				else
1308 					break;
1309 			}
1310 		}
1311 
1312 		tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
1313 				SUPPORTED_FREQUENCIES_STEREO, stereo_freqs);
1314 		WREG32_AUDIO_ENDPT(dig->afmt->pin->offset, eld_reg_to_type[i][0], tmp);
1315 	}
1316 
1317 	kfree(sads);
1318 
1319 }
1320 
1321 static void dce_v6_0_audio_enable(struct amdgpu_device *adev,
1322 				  struct amdgpu_audio_pin *pin,
1323 				  bool enable)
1324 {
1325 	if (!pin)
1326 		return;
1327 
1328 	WREG32_AUDIO_ENDPT(pin->offset, ixAZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
1329 			enable ? AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK : 0);
1330 }
1331 
1332 static const u32 pin_offsets[7] =
1333 {
1334 	(0x1780 - 0x1780),
1335 	(0x1786 - 0x1780),
1336 	(0x178c - 0x1780),
1337 	(0x1792 - 0x1780),
1338 	(0x1798 - 0x1780),
1339 	(0x179d - 0x1780),
1340 	(0x17a4 - 0x1780),
1341 };
1342 
1343 static int dce_v6_0_audio_init(struct amdgpu_device *adev)
1344 {
1345 	int i;
1346 
1347 	if (!amdgpu_audio)
1348 		return 0;
1349 
1350 	adev->mode_info.audio.enabled = true;
1351 
1352 	switch (adev->asic_type) {
1353 	case CHIP_TAHITI:
1354 	case CHIP_PITCAIRN:
1355 	case CHIP_VERDE:
1356 	default:
1357 		adev->mode_info.audio.num_pins = 6;
1358 		break;
1359 	case CHIP_OLAND:
1360 		adev->mode_info.audio.num_pins = 2;
1361 		break;
1362 	}
1363 
1364 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1365 		adev->mode_info.audio.pin[i].channels = -1;
1366 		adev->mode_info.audio.pin[i].rate = -1;
1367 		adev->mode_info.audio.pin[i].bits_per_sample = -1;
1368 		adev->mode_info.audio.pin[i].status_bits = 0;
1369 		adev->mode_info.audio.pin[i].category_code = 0;
1370 		adev->mode_info.audio.pin[i].connected = false;
1371 		adev->mode_info.audio.pin[i].offset = pin_offsets[i];
1372 		adev->mode_info.audio.pin[i].id = i;
1373 		dce_v6_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
1374 	}
1375 
1376 	return 0;
1377 }
1378 
1379 static void dce_v6_0_audio_fini(struct amdgpu_device *adev)
1380 {
1381 	int i;
1382 
1383 	if (!amdgpu_audio)
1384 		return;
1385 
1386 	if (!adev->mode_info.audio.enabled)
1387 		return;
1388 
1389 	for (i = 0; i < adev->mode_info.audio.num_pins; i++)
1390 		dce_v6_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
1391 
1392 	adev->mode_info.audio.enabled = false;
1393 }
1394 
1395 static void dce_v6_0_audio_set_vbi_packet(struct drm_encoder *encoder)
1396 {
1397 	struct drm_device *dev = encoder->dev;
1398 	struct amdgpu_device *adev = drm_to_adev(dev);
1399 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1400 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1401 	u32 tmp;
1402 
1403 	tmp = RREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset);
1404 	tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_NULL_SEND, 1);
1405 	tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_GC_SEND, 1);
1406 	tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_GC_CONT, 1);
1407 	WREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset, tmp);
1408 }
1409 
1410 static void dce_v6_0_audio_set_acr(struct drm_encoder *encoder,
1411 				   uint32_t clock, int bpc)
1412 {
1413 	struct drm_device *dev = encoder->dev;
1414 	struct amdgpu_device *adev = drm_to_adev(dev);
1415 	struct amdgpu_afmt_acr acr = amdgpu_afmt_acr(clock);
1416 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1417 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1418 	u32 tmp;
1419 
1420 	tmp = RREG32(mmHDMI_ACR_PACKET_CONTROL + dig->afmt->offset);
1421 	tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_AUTO_SEND, 1);
1422 	tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_SOURCE,
1423 			bpc > 8 ? 0 : 1);
1424 	WREG32(mmHDMI_ACR_PACKET_CONTROL + dig->afmt->offset, tmp);
1425 
1426 	tmp = RREG32(mmHDMI_ACR_32_0 + dig->afmt->offset);
1427 	tmp = REG_SET_FIELD(tmp, HDMI_ACR_32_0, HDMI_ACR_CTS_32, acr.cts_32khz);
1428 	WREG32(mmHDMI_ACR_32_0 + dig->afmt->offset, tmp);
1429 	tmp = RREG32(mmHDMI_ACR_32_1 + dig->afmt->offset);
1430 	tmp = REG_SET_FIELD(tmp, HDMI_ACR_32_1, HDMI_ACR_N_32, acr.n_32khz);
1431 	WREG32(mmHDMI_ACR_32_1 + dig->afmt->offset, tmp);
1432 
1433 	tmp = RREG32(mmHDMI_ACR_44_0 + dig->afmt->offset);
1434 	tmp = REG_SET_FIELD(tmp, HDMI_ACR_44_0, HDMI_ACR_CTS_44, acr.cts_44_1khz);
1435 	WREG32(mmHDMI_ACR_44_0 + dig->afmt->offset, tmp);
1436 	tmp = RREG32(mmHDMI_ACR_44_1 + dig->afmt->offset);
1437 	tmp = REG_SET_FIELD(tmp, HDMI_ACR_44_1, HDMI_ACR_N_44, acr.n_44_1khz);
1438 	WREG32(mmHDMI_ACR_44_1 + dig->afmt->offset, tmp);
1439 
1440 	tmp = RREG32(mmHDMI_ACR_48_0 + dig->afmt->offset);
1441 	tmp = REG_SET_FIELD(tmp, HDMI_ACR_48_0, HDMI_ACR_CTS_48, acr.cts_48khz);
1442 	WREG32(mmHDMI_ACR_48_0 + dig->afmt->offset, tmp);
1443 	tmp = RREG32(mmHDMI_ACR_48_1 + dig->afmt->offset);
1444 	tmp = REG_SET_FIELD(tmp, HDMI_ACR_48_1, HDMI_ACR_N_48, acr.n_48khz);
1445 	WREG32(mmHDMI_ACR_48_1 + dig->afmt->offset, tmp);
1446 }
1447 
1448 static void dce_v6_0_audio_set_avi_infoframe(struct drm_encoder *encoder,
1449 					       struct drm_display_mode *mode)
1450 {
1451 	struct drm_device *dev = encoder->dev;
1452 	struct amdgpu_device *adev = drm_to_adev(dev);
1453 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1454 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1455 	struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
1456 	struct hdmi_avi_infoframe frame;
1457 	u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
1458 	uint8_t *payload = buffer + 3;
1459 	uint8_t *header = buffer;
1460 	ssize_t err;
1461 	u32 tmp;
1462 
1463 	err = drm_hdmi_avi_infoframe_from_display_mode(&frame, connector, mode);
1464 	if (err < 0) {
1465 		DRM_ERROR("failed to setup AVI infoframe: %zd\n", err);
1466 		return;
1467 	}
1468 
1469 	err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer));
1470 	if (err < 0) {
1471 		DRM_ERROR("failed to pack AVI infoframe: %zd\n", err);
1472 		return;
1473 	}
1474 
1475 	WREG32(mmAFMT_AVI_INFO0 + dig->afmt->offset,
1476 	       payload[0x0] | (payload[0x1] << 8) | (payload[0x2] << 16) | (payload[0x3] << 24));
1477 	WREG32(mmAFMT_AVI_INFO1 + dig->afmt->offset,
1478 	       payload[0x4] | (payload[0x5] << 8) | (payload[0x6] << 16) | (payload[0x7] << 24));
1479 	WREG32(mmAFMT_AVI_INFO2 + dig->afmt->offset,
1480 	       payload[0x8] | (payload[0x9] << 8) | (payload[0xA] << 16) | (payload[0xB] << 24));
1481 	WREG32(mmAFMT_AVI_INFO3 + dig->afmt->offset,
1482 	       payload[0xC] | (payload[0xD] << 8) | (header[1] << 24));
1483 
1484 	tmp = RREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset);
1485 	/* anything other than 0 */
1486 	tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL1,
1487 			HDMI_AUDIO_INFO_LINE, 2);
1488 	WREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset, tmp);
1489 }
1490 
1491 static void dce_v6_0_audio_set_dto(struct drm_encoder *encoder, u32 clock)
1492 {
1493 	struct drm_device *dev = encoder->dev;
1494 	struct amdgpu_device *adev = drm_to_adev(dev);
1495 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
1496 	int em = amdgpu_atombios_encoder_get_encoder_mode(encoder);
1497 	u32 tmp;
1498 
1499 	/*
1500 	 * Two dtos: generally use dto0 for hdmi, dto1 for dp.
1501 	 * Express [24MHz / target pixel clock] as an exact rational
1502 	 * number (coefficient of two integer numbers.  DCCG_AUDIO_DTOx_PHASE
1503 	 * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
1504 	 */
1505 	tmp = RREG32(mmDCCG_AUDIO_DTO_SOURCE);
1506 	tmp = REG_SET_FIELD(tmp, DCCG_AUDIO_DTO_SOURCE,
1507 			DCCG_AUDIO_DTO0_SOURCE_SEL, amdgpu_crtc->crtc_id);
1508 	if (em == ATOM_ENCODER_MODE_HDMI) {
1509 		tmp = REG_SET_FIELD(tmp, DCCG_AUDIO_DTO_SOURCE,
1510 				DCCG_AUDIO_DTO_SEL, 0);
1511 	} else if (ENCODER_MODE_IS_DP(em)) {
1512 		tmp = REG_SET_FIELD(tmp, DCCG_AUDIO_DTO_SOURCE,
1513 				DCCG_AUDIO_DTO_SEL, 1);
1514 	}
1515 	WREG32(mmDCCG_AUDIO_DTO_SOURCE, tmp);
1516 	if (em == ATOM_ENCODER_MODE_HDMI) {
1517 		WREG32(mmDCCG_AUDIO_DTO0_PHASE, 24000);
1518 		WREG32(mmDCCG_AUDIO_DTO0_MODULE, clock);
1519 	} else if (ENCODER_MODE_IS_DP(em)) {
1520 		WREG32(mmDCCG_AUDIO_DTO1_PHASE, 24000);
1521 		WREG32(mmDCCG_AUDIO_DTO1_MODULE, clock);
1522 	}
1523 }
1524 
1525 static void dce_v6_0_audio_set_packet(struct drm_encoder *encoder)
1526 {
1527 	struct drm_device *dev = encoder->dev;
1528 	struct amdgpu_device *adev = drm_to_adev(dev);
1529 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1530 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1531 	u32 tmp;
1532 
1533 	tmp = RREG32(mmAFMT_INFOFRAME_CONTROL0 + dig->afmt->offset);
1534 	tmp = REG_SET_FIELD(tmp, AFMT_INFOFRAME_CONTROL0, AFMT_AUDIO_INFO_UPDATE, 1);
1535 	WREG32(mmAFMT_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp);
1536 
1537 	tmp = RREG32(mmAFMT_60958_0 + dig->afmt->offset);
1538 	tmp = REG_SET_FIELD(tmp, AFMT_60958_0, AFMT_60958_CS_CHANNEL_NUMBER_L, 1);
1539 	WREG32(mmAFMT_60958_0 + dig->afmt->offset, tmp);
1540 
1541 	tmp = RREG32(mmAFMT_60958_1 + dig->afmt->offset);
1542 	tmp = REG_SET_FIELD(tmp, AFMT_60958_1, AFMT_60958_CS_CHANNEL_NUMBER_R, 2);
1543 	WREG32(mmAFMT_60958_1 + dig->afmt->offset, tmp);
1544 
1545 	tmp = RREG32(mmAFMT_60958_2 + dig->afmt->offset);
1546 	tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_2, 3);
1547 	tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_3, 4);
1548 	tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_4, 5);
1549 	tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_5, 6);
1550 	tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_6, 7);
1551 	tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_7, 8);
1552 	WREG32(mmAFMT_60958_2 + dig->afmt->offset, tmp);
1553 
1554 	tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL2 + dig->afmt->offset);
1555 	tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL2, AFMT_AUDIO_CHANNEL_ENABLE, 0xff);
1556 	WREG32(mmAFMT_AUDIO_PACKET_CONTROL2 + dig->afmt->offset, tmp);
1557 
1558 	tmp = RREG32(mmHDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset);
1559 	tmp = REG_SET_FIELD(tmp, HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_DELAY_EN, 1);
1560 	tmp = REG_SET_FIELD(tmp, HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_PACKETS_PER_LINE, 3);
1561 	WREG32(mmHDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
1562 
1563 	tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset);
1564 	tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_RESET_FIFO_WHEN_AUDIO_DIS, 1);
1565 	tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_60958_CS_UPDATE, 1);
1566 	WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
1567 }
1568 
1569 static void dce_v6_0_audio_set_mute(struct drm_encoder *encoder, bool mute)
1570 {
1571 	struct drm_device *dev = encoder->dev;
1572 	struct amdgpu_device *adev = drm_to_adev(dev);
1573 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1574 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1575 	u32 tmp;
1576 
1577 	tmp = RREG32(mmHDMI_GC + dig->afmt->offset);
1578 	tmp = REG_SET_FIELD(tmp, HDMI_GC, HDMI_GC_AVMUTE, mute ? 1 : 0);
1579 	WREG32(mmHDMI_GC + dig->afmt->offset, tmp);
1580 }
1581 
1582 static void dce_v6_0_audio_hdmi_enable(struct drm_encoder *encoder, bool enable)
1583 {
1584 	struct drm_device *dev = encoder->dev;
1585 	struct amdgpu_device *adev = drm_to_adev(dev);
1586 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1587 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1588 	u32 tmp;
1589 
1590 	if (enable) {
1591 		tmp = RREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset);
1592 		tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_SEND, 1);
1593 		tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_CONT, 1);
1594 		tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_SEND, 1);
1595 		tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_CONT, 1);
1596 		WREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp);
1597 
1598 		tmp = RREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset);
1599 		tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL1, HDMI_AVI_INFO_LINE, 2);
1600 		WREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset, tmp);
1601 
1602 		tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset);
1603 		tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, 1);
1604 		WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
1605 	} else {
1606 		tmp = RREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset);
1607 		tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_SEND, 0);
1608 		tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_CONT, 0);
1609 		tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_SEND, 0);
1610 		tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_CONT, 0);
1611 		WREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp);
1612 
1613 		tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset);
1614 		tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, 0);
1615 		WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
1616 	}
1617 }
1618 
1619 static void dce_v6_0_audio_dp_enable(struct drm_encoder *encoder, bool enable)
1620 {
1621 	struct drm_device *dev = encoder->dev;
1622 	struct amdgpu_device *adev = drm_to_adev(dev);
1623 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1624 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1625 	u32 tmp;
1626 
1627 	if (enable) {
1628 		tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset);
1629 		tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, 1);
1630 		WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
1631 
1632 		tmp = RREG32(mmDP_SEC_TIMESTAMP + dig->afmt->offset);
1633 		tmp = REG_SET_FIELD(tmp, DP_SEC_TIMESTAMP, DP_SEC_TIMESTAMP_MODE, 1);
1634 		WREG32(mmDP_SEC_TIMESTAMP + dig->afmt->offset, tmp);
1635 
1636 		tmp = RREG32(mmDP_SEC_CNTL + dig->afmt->offset);
1637 		tmp = REG_SET_FIELD(tmp, DP_SEC_CNTL, DP_SEC_ASP_ENABLE, 1);
1638 		tmp = REG_SET_FIELD(tmp, DP_SEC_CNTL, DP_SEC_ATP_ENABLE, 1);
1639 		tmp = REG_SET_FIELD(tmp, DP_SEC_CNTL, DP_SEC_AIP_ENABLE, 1);
1640 		tmp = REG_SET_FIELD(tmp, DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, 1);
1641 		WREG32(mmDP_SEC_CNTL + dig->afmt->offset, tmp);
1642 	} else {
1643 		WREG32(mmDP_SEC_CNTL + dig->afmt->offset, 0);
1644 	}
1645 }
1646 
1647 static void dce_v6_0_afmt_setmode(struct drm_encoder *encoder,
1648 				  struct drm_display_mode *mode)
1649 {
1650 	struct drm_device *dev = encoder->dev;
1651 	struct amdgpu_device *adev = drm_to_adev(dev);
1652 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1653 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1654 	struct drm_connector *connector;
1655 	struct drm_connector_list_iter iter;
1656 	struct amdgpu_connector *amdgpu_connector = NULL;
1657 	int em = amdgpu_atombios_encoder_get_encoder_mode(encoder);
1658 	int bpc = 8;
1659 
1660 	if (!dig || !dig->afmt)
1661 		return;
1662 
1663 	drm_connector_list_iter_begin(dev, &iter);
1664 	drm_for_each_connector_iter(connector, &iter) {
1665 		if (connector->encoder == encoder) {
1666 			amdgpu_connector = to_amdgpu_connector(connector);
1667 			break;
1668 		}
1669 	}
1670 	drm_connector_list_iter_end(&iter);
1671 
1672 	if (!amdgpu_connector) {
1673 		DRM_ERROR("Couldn't find encoder's connector\n");
1674 		return;
1675 	}
1676 
1677 	if (!dig->afmt->enabled)
1678 		return;
1679 
1680 	dig->afmt->pin = dce_v6_0_audio_get_pin(adev);
1681 	if (!dig->afmt->pin)
1682 		return;
1683 
1684 	if (encoder->crtc) {
1685 		struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
1686 		bpc = amdgpu_crtc->bpc;
1687 	}
1688 
1689 	/* disable audio before setting up hw */
1690 	dce_v6_0_audio_enable(adev, dig->afmt->pin, false);
1691 
1692 	dce_v6_0_audio_set_mute(encoder, true);
1693 	dce_v6_0_audio_write_speaker_allocation(encoder);
1694 	dce_v6_0_audio_write_sad_regs(encoder);
1695 	dce_v6_0_audio_write_latency_fields(encoder, mode);
1696 	if (em == ATOM_ENCODER_MODE_HDMI) {
1697 		dce_v6_0_audio_set_dto(encoder, mode->clock);
1698 		dce_v6_0_audio_set_vbi_packet(encoder);
1699 		dce_v6_0_audio_set_acr(encoder, mode->clock, bpc);
1700 	} else if (ENCODER_MODE_IS_DP(em)) {
1701 		dce_v6_0_audio_set_dto(encoder, adev->clock.default_dispclk * 10);
1702 	}
1703 	dce_v6_0_audio_set_packet(encoder);
1704 	dce_v6_0_audio_select_pin(encoder);
1705 	dce_v6_0_audio_set_avi_infoframe(encoder, mode);
1706 	dce_v6_0_audio_set_mute(encoder, false);
1707 	if (em == ATOM_ENCODER_MODE_HDMI) {
1708 		dce_v6_0_audio_hdmi_enable(encoder, 1);
1709 	} else if (ENCODER_MODE_IS_DP(em)) {
1710 		dce_v6_0_audio_dp_enable(encoder, 1);
1711 	}
1712 
1713 	/* enable audio after setting up hw */
1714 	dce_v6_0_audio_enable(adev, dig->afmt->pin, true);
1715 }
1716 
1717 static void dce_v6_0_afmt_enable(struct drm_encoder *encoder, bool enable)
1718 {
1719 	struct drm_device *dev = encoder->dev;
1720 	struct amdgpu_device *adev = drm_to_adev(dev);
1721 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1722 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1723 
1724 	if (!dig || !dig->afmt)
1725 		return;
1726 
1727 	/* Silent, r600_hdmi_enable will raise WARN for us */
1728 	if (enable && dig->afmt->enabled)
1729 		return;
1730 
1731 	if (!enable && !dig->afmt->enabled)
1732 		return;
1733 
1734 	if (!enable && dig->afmt->pin) {
1735 		dce_v6_0_audio_enable(adev, dig->afmt->pin, false);
1736 		dig->afmt->pin = NULL;
1737 	}
1738 
1739 	dig->afmt->enabled = enable;
1740 
1741 	DRM_DEBUG("%sabling AFMT interface @ 0x%04X for encoder 0x%x\n",
1742 		  enable ? "En" : "Dis", dig->afmt->offset, amdgpu_encoder->encoder_id);
1743 }
1744 
1745 static int dce_v6_0_afmt_init(struct amdgpu_device *adev)
1746 {
1747 	int i, j;
1748 
1749 	for (i = 0; i < adev->mode_info.num_dig; i++)
1750 		adev->mode_info.afmt[i] = NULL;
1751 
1752 	/* DCE6 has audio blocks tied to DIG encoders */
1753 	for (i = 0; i < adev->mode_info.num_dig; i++) {
1754 		adev->mode_info.afmt[i] = kzalloc(sizeof(struct amdgpu_afmt), GFP_KERNEL);
1755 		if (adev->mode_info.afmt[i]) {
1756 			adev->mode_info.afmt[i]->offset = dig_offsets[i];
1757 			adev->mode_info.afmt[i]->id = i;
1758 		} else {
1759 			for (j = 0; j < i; j++) {
1760 				kfree(adev->mode_info.afmt[j]);
1761 				adev->mode_info.afmt[j] = NULL;
1762 			}
1763 			DRM_ERROR("Out of memory allocating afmt table\n");
1764 			return -ENOMEM;
1765 		}
1766 	}
1767 	return 0;
1768 }
1769 
1770 static void dce_v6_0_afmt_fini(struct amdgpu_device *adev)
1771 {
1772 	int i;
1773 
1774 	for (i = 0; i < adev->mode_info.num_dig; i++) {
1775 		kfree(adev->mode_info.afmt[i]);
1776 		adev->mode_info.afmt[i] = NULL;
1777 	}
1778 }
1779 
1780 static const u32 vga_control_regs[6] =
1781 {
1782 	mmD1VGA_CONTROL,
1783 	mmD2VGA_CONTROL,
1784 	mmD3VGA_CONTROL,
1785 	mmD4VGA_CONTROL,
1786 	mmD5VGA_CONTROL,
1787 	mmD6VGA_CONTROL,
1788 };
1789 
1790 static void dce_v6_0_vga_enable(struct drm_crtc *crtc, bool enable)
1791 {
1792 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1793 	struct drm_device *dev = crtc->dev;
1794 	struct amdgpu_device *adev = drm_to_adev(dev);
1795 	u32 vga_control;
1796 
1797 	vga_control = RREG32(vga_control_regs[amdgpu_crtc->crtc_id]) & ~1;
1798 	WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control | (enable ? 1 : 0));
1799 }
1800 
1801 static void dce_v6_0_grph_enable(struct drm_crtc *crtc, bool enable)
1802 {
1803 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1804 	struct drm_device *dev = crtc->dev;
1805 	struct amdgpu_device *adev = drm_to_adev(dev);
1806 
1807 	WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, enable ? 1 : 0);
1808 }
1809 
1810 static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
1811 				     struct drm_framebuffer *fb,
1812 				     int x, int y, int atomic)
1813 {
1814 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1815 	struct drm_device *dev = crtc->dev;
1816 	struct amdgpu_device *adev = drm_to_adev(dev);
1817 	struct drm_framebuffer *target_fb;
1818 	struct drm_gem_object *obj;
1819 	struct amdgpu_bo *abo;
1820 	uint64_t fb_location, tiling_flags;
1821 	uint32_t fb_format, fb_pitch_pixels, pipe_config;
1822 	u32 fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_NONE);
1823 	u32 viewport_w, viewport_h;
1824 	int r;
1825 	bool bypass_lut = false;
1826 
1827 	/* no fb bound */
1828 	if (!atomic && !crtc->primary->fb) {
1829 		DRM_DEBUG_KMS("No FB bound\n");
1830 		return 0;
1831 	}
1832 
1833 	if (atomic)
1834 		target_fb = fb;
1835 	else
1836 		target_fb = crtc->primary->fb;
1837 
1838 	/* If atomic, assume fb object is pinned & idle & fenced and
1839 	 * just update base pointers
1840 	 */
1841 	obj = target_fb->obj[0];
1842 	abo = gem_to_amdgpu_bo(obj);
1843 	r = amdgpu_bo_reserve(abo, false);
1844 	if (unlikely(r != 0))
1845 		return r;
1846 
1847 	if (!atomic) {
1848 		r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM);
1849 		if (unlikely(r != 0)) {
1850 			amdgpu_bo_unreserve(abo);
1851 			return -EINVAL;
1852 		}
1853 	}
1854 	fb_location = amdgpu_bo_gpu_offset(abo);
1855 
1856 	amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
1857 	amdgpu_bo_unreserve(abo);
1858 
1859 	switch (target_fb->format->format) {
1860 	case DRM_FORMAT_C8:
1861 		fb_format = (GRPH_DEPTH(GRPH_DEPTH_8BPP) |
1862 			     GRPH_FORMAT(GRPH_FORMAT_INDEXED));
1863 		break;
1864 	case DRM_FORMAT_XRGB4444:
1865 	case DRM_FORMAT_ARGB4444:
1866 		fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) |
1867 			     GRPH_FORMAT(GRPH_FORMAT_ARGB4444));
1868 #ifdef __BIG_ENDIAN
1869 		fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16);
1870 #endif
1871 		break;
1872 	case DRM_FORMAT_XRGB1555:
1873 	case DRM_FORMAT_ARGB1555:
1874 		fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) |
1875 			     GRPH_FORMAT(GRPH_FORMAT_ARGB1555));
1876 #ifdef __BIG_ENDIAN
1877 		fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16);
1878 #endif
1879 		break;
1880 	case DRM_FORMAT_BGRX5551:
1881 	case DRM_FORMAT_BGRA5551:
1882 		fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) |
1883 			     GRPH_FORMAT(GRPH_FORMAT_BGRA5551));
1884 #ifdef __BIG_ENDIAN
1885 		fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16);
1886 #endif
1887 		break;
1888 	case DRM_FORMAT_RGB565:
1889 		fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) |
1890 			     GRPH_FORMAT(GRPH_FORMAT_ARGB565));
1891 #ifdef __BIG_ENDIAN
1892 		fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16);
1893 #endif
1894 		break;
1895 	case DRM_FORMAT_XRGB8888:
1896 	case DRM_FORMAT_ARGB8888:
1897 		fb_format = (GRPH_DEPTH(GRPH_DEPTH_32BPP) |
1898 			     GRPH_FORMAT(GRPH_FORMAT_ARGB8888));
1899 #ifdef __BIG_ENDIAN
1900 		fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN32);
1901 #endif
1902 		break;
1903 	case DRM_FORMAT_XRGB2101010:
1904 	case DRM_FORMAT_ARGB2101010:
1905 		fb_format = (GRPH_DEPTH(GRPH_DEPTH_32BPP) |
1906 			     GRPH_FORMAT(GRPH_FORMAT_ARGB2101010));
1907 #ifdef __BIG_ENDIAN
1908 		fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN32);
1909 #endif
1910 		/* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
1911 		bypass_lut = true;
1912 		break;
1913 	case DRM_FORMAT_BGRX1010102:
1914 	case DRM_FORMAT_BGRA1010102:
1915 		fb_format = (GRPH_DEPTH(GRPH_DEPTH_32BPP) |
1916 			     GRPH_FORMAT(GRPH_FORMAT_BGRA1010102));
1917 #ifdef __BIG_ENDIAN
1918 		fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN32);
1919 #endif
1920 		/* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
1921 		bypass_lut = true;
1922 		break;
1923 	case DRM_FORMAT_XBGR8888:
1924 	case DRM_FORMAT_ABGR8888:
1925 		fb_format = (GRPH_DEPTH(GRPH_DEPTH_32BPP) |
1926 			     GRPH_FORMAT(GRPH_FORMAT_ARGB8888));
1927 		fb_swap = (GRPH_RED_CROSSBAR(GRPH_RED_SEL_B) |
1928 			   GRPH_BLUE_CROSSBAR(GRPH_BLUE_SEL_R));
1929 #ifdef __BIG_ENDIAN
1930 		fb_swap |= GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN32);
1931 #endif
1932 		break;
1933 	default:
1934 		DRM_ERROR("Unsupported screen format %p4cc\n",
1935 			  &target_fb->format->format);
1936 		return -EINVAL;
1937 	}
1938 
1939 	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_2D_TILED_THIN1) {
1940 		unsigned bankw, bankh, mtaspect, tile_split, num_banks;
1941 
1942 		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
1943 		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
1944 		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
1945 		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
1946 		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
1947 
1948 		fb_format |= GRPH_NUM_BANKS(num_banks);
1949 		fb_format |= GRPH_ARRAY_MODE(GRPH_ARRAY_2D_TILED_THIN1);
1950 		fb_format |= GRPH_TILE_SPLIT(tile_split);
1951 		fb_format |= GRPH_BANK_WIDTH(bankw);
1952 		fb_format |= GRPH_BANK_HEIGHT(bankh);
1953 		fb_format |= GRPH_MACRO_TILE_ASPECT(mtaspect);
1954 	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_1D_TILED_THIN1) {
1955 		fb_format |= GRPH_ARRAY_MODE(GRPH_ARRAY_1D_TILED_THIN1);
1956 	}
1957 
1958 	pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
1959 	fb_format |= GRPH_PIPE_CONFIG(pipe_config);
1960 
1961 	dce_v6_0_vga_enable(crtc, false);
1962 
1963 	/* Make sure surface address is updated at vertical blank rather than
1964 	 * horizontal blank
1965 	 */
1966 	WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, 0);
1967 
1968 	WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
1969 	       upper_32_bits(fb_location));
1970 	WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
1971 	       upper_32_bits(fb_location));
1972 	WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
1973 	       (u32)fb_location & GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK);
1974 	WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
1975 	       (u32) fb_location & GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK);
1976 	WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format);
1977 	WREG32(mmGRPH_SWAP_CNTL + amdgpu_crtc->crtc_offset, fb_swap);
1978 
1979 	/*
1980 	 * The LUT only has 256 slots for indexing by a 8 bpc fb. Bypass the LUT
1981 	 * for > 8 bpc scanout to avoid truncation of fb indices to 8 msb's, to
1982 	 * retain the full precision throughout the pipeline.
1983 	 */
1984 	WREG32_P(mmGRPH_LUT_10BIT_BYPASS + amdgpu_crtc->crtc_offset,
1985 		 (bypass_lut ? GRPH_LUT_10BIT_BYPASS__GRPH_LUT_10BIT_BYPASS_EN_MASK : 0),
1986 		 ~GRPH_LUT_10BIT_BYPASS__GRPH_LUT_10BIT_BYPASS_EN_MASK);
1987 
1988 	if (bypass_lut)
1989 		DRM_DEBUG_KMS("Bypassing hardware LUT due to 10 bit fb scanout.\n");
1990 
1991 	WREG32(mmGRPH_SURFACE_OFFSET_X + amdgpu_crtc->crtc_offset, 0);
1992 	WREG32(mmGRPH_SURFACE_OFFSET_Y + amdgpu_crtc->crtc_offset, 0);
1993 	WREG32(mmGRPH_X_START + amdgpu_crtc->crtc_offset, 0);
1994 	WREG32(mmGRPH_Y_START + amdgpu_crtc->crtc_offset, 0);
1995 	WREG32(mmGRPH_X_END + amdgpu_crtc->crtc_offset, target_fb->width);
1996 	WREG32(mmGRPH_Y_END + amdgpu_crtc->crtc_offset, target_fb->height);
1997 
1998 	fb_pitch_pixels = target_fb->pitches[0] / target_fb->format->cpp[0];
1999 	WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset, fb_pitch_pixels);
2000 
2001 	dce_v6_0_grph_enable(crtc, true);
2002 
2003 	WREG32(mmDESKTOP_HEIGHT + amdgpu_crtc->crtc_offset,
2004 		       target_fb->height);
2005 	x &= ~3;
2006 	y &= ~1;
2007 	WREG32(mmVIEWPORT_START + amdgpu_crtc->crtc_offset,
2008 	       (x << 16) | y);
2009 	viewport_w = crtc->mode.hdisplay;
2010 	viewport_h = (crtc->mode.vdisplay + 1) & ~1;
2011 
2012 	WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset,
2013 	       (viewport_w << 16) | viewport_h);
2014 
2015 	/* set pageflip to happen anywhere in vblank interval */
2016 	WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0);
2017 
2018 	if (!atomic && fb && fb != crtc->primary->fb) {
2019 		abo = gem_to_amdgpu_bo(fb->obj[0]);
2020 		r = amdgpu_bo_reserve(abo, true);
2021 		if (unlikely(r != 0))
2022 			return r;
2023 		amdgpu_bo_unpin(abo);
2024 		amdgpu_bo_unreserve(abo);
2025 	}
2026 
2027 	/* Bytes per pixel may have changed */
2028 	dce_v6_0_bandwidth_update(adev);
2029 
2030 	return 0;
2031 
2032 }
2033 
2034 static void dce_v6_0_set_interleave(struct drm_crtc *crtc,
2035 				    struct drm_display_mode *mode)
2036 {
2037 	struct drm_device *dev = crtc->dev;
2038 	struct amdgpu_device *adev = drm_to_adev(dev);
2039 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2040 
2041 	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
2042 		WREG32(mmDATA_FORMAT + amdgpu_crtc->crtc_offset,
2043 		       INTERLEAVE_EN);
2044 	else
2045 		WREG32(mmDATA_FORMAT + amdgpu_crtc->crtc_offset, 0);
2046 }
2047 
2048 static void dce_v6_0_crtc_load_lut(struct drm_crtc *crtc)
2049 {
2050 
2051 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2052 	struct drm_device *dev = crtc->dev;
2053 	struct amdgpu_device *adev = drm_to_adev(dev);
2054 	u16 *r, *g, *b;
2055 	int i;
2056 
2057 	DRM_DEBUG_KMS("%d\n", amdgpu_crtc->crtc_id);
2058 
2059 	WREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
2060 	       ((0 << INPUT_CSC_CONTROL__INPUT_CSC_GRPH_MODE__SHIFT) |
2061 		(0 << INPUT_CSC_CONTROL__INPUT_CSC_OVL_MODE__SHIFT)));
2062 	WREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset,
2063 	       PRESCALE_GRPH_CONTROL__GRPH_PRESCALE_BYPASS_MASK);
2064 	WREG32(mmPRESCALE_OVL_CONTROL + amdgpu_crtc->crtc_offset,
2065 	       PRESCALE_OVL_CONTROL__OVL_PRESCALE_BYPASS_MASK);
2066 	WREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset,
2067 	       ((0 << INPUT_GAMMA_CONTROL__GRPH_INPUT_GAMMA_MODE__SHIFT) |
2068 		(0 << INPUT_GAMMA_CONTROL__OVL_INPUT_GAMMA_MODE__SHIFT)));
2069 
2070 	WREG32(mmDC_LUT_CONTROL + amdgpu_crtc->crtc_offset, 0);
2071 
2072 	WREG32(mmDC_LUT_BLACK_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0);
2073 	WREG32(mmDC_LUT_BLACK_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0);
2074 	WREG32(mmDC_LUT_BLACK_OFFSET_RED + amdgpu_crtc->crtc_offset, 0);
2075 
2076 	WREG32(mmDC_LUT_WHITE_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0xffff);
2077 	WREG32(mmDC_LUT_WHITE_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0xffff);
2078 	WREG32(mmDC_LUT_WHITE_OFFSET_RED + amdgpu_crtc->crtc_offset, 0xffff);
2079 
2080 	WREG32(mmDC_LUT_RW_MODE + amdgpu_crtc->crtc_offset, 0);
2081 	WREG32(mmDC_LUT_WRITE_EN_MASK + amdgpu_crtc->crtc_offset, 0x00000007);
2082 
2083 	WREG32(mmDC_LUT_RW_INDEX + amdgpu_crtc->crtc_offset, 0);
2084 	r = crtc->gamma_store;
2085 	g = r + crtc->gamma_size;
2086 	b = g + crtc->gamma_size;
2087 	for (i = 0; i < 256; i++) {
2088 		WREG32(mmDC_LUT_30_COLOR + amdgpu_crtc->crtc_offset,
2089 		       ((*r++ & 0xffc0) << 14) |
2090 		       ((*g++ & 0xffc0) << 4) |
2091 		       (*b++ >> 6));
2092 	}
2093 
2094 	WREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
2095 	       ((0 << DEGAMMA_CONTROL__GRPH_DEGAMMA_MODE__SHIFT) |
2096 		(0 << DEGAMMA_CONTROL__OVL_DEGAMMA_MODE__SHIFT) |
2097 		ICON_DEGAMMA_MODE(0) |
2098 		(0 << DEGAMMA_CONTROL__CURSOR_DEGAMMA_MODE__SHIFT)));
2099 	WREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset,
2100 	       ((0 << GAMUT_REMAP_CONTROL__GRPH_GAMUT_REMAP_MODE__SHIFT) |
2101 		(0 << GAMUT_REMAP_CONTROL__OVL_GAMUT_REMAP_MODE__SHIFT)));
2102 	WREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
2103 	       ((0 << REGAMMA_CONTROL__GRPH_REGAMMA_MODE__SHIFT) |
2104 		(0 << REGAMMA_CONTROL__OVL_REGAMMA_MODE__SHIFT)));
2105 	WREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
2106 	       ((0 << OUTPUT_CSC_CONTROL__OUTPUT_CSC_GRPH_MODE__SHIFT) |
2107 		(0 << OUTPUT_CSC_CONTROL__OUTPUT_CSC_OVL_MODE__SHIFT)));
2108 	/* XXX match this to the depth of the crtc fmt block, move to modeset? */
2109 	WREG32(0x1a50 + amdgpu_crtc->crtc_offset, 0);
2110 
2111 
2112 }
2113 
2114 static int dce_v6_0_pick_dig_encoder(struct drm_encoder *encoder)
2115 {
2116 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
2117 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
2118 
2119 	switch (amdgpu_encoder->encoder_id) {
2120 	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
2121 		return dig->linkb ? 1 : 0;
2122 	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
2123 		return dig->linkb ? 3 : 2;
2124 	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
2125 		return dig->linkb ? 5 : 4;
2126 	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
2127 		return 6;
2128 	default:
2129 		DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder->encoder_id);
2130 		return 0;
2131 	}
2132 }
2133 
2134 /**
2135  * dce_v6_0_pick_pll - Allocate a PPLL for use by the crtc.
2136  *
2137  * @crtc: drm crtc
2138  *
2139  * Returns the PPLL (Pixel PLL) to be used by the crtc.  For DP monitors
2140  * a single PPLL can be used for all DP crtcs/encoders.  For non-DP
2141  * monitors a dedicated PPLL must be used.  If a particular board has
2142  * an external DP PLL, return ATOM_PPLL_INVALID to skip PLL programming
2143  * as there is no need to program the PLL itself.  If we are not able to
2144  * allocate a PLL, return ATOM_PPLL_INVALID to skip PLL programming to
2145  * avoid messing up an existing monitor.
2146  *
2147  *
2148  */
2149 static u32 dce_v6_0_pick_pll(struct drm_crtc *crtc)
2150 {
2151 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2152 	struct drm_device *dev = crtc->dev;
2153 	struct amdgpu_device *adev = drm_to_adev(dev);
2154 	u32 pll_in_use;
2155 	int pll;
2156 
2157 	if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder))) {
2158 		if (adev->clock.dp_extclk)
2159 			/* skip PPLL programming if using ext clock */
2160 			return ATOM_PPLL_INVALID;
2161 		else
2162 			return ATOM_PPLL0;
2163 	} else {
2164 		/* use the same PPLL for all monitors with the same clock */
2165 		pll = amdgpu_pll_get_shared_nondp_ppll(crtc);
2166 		if (pll != ATOM_PPLL_INVALID)
2167 			return pll;
2168 	}
2169 
2170 	/*  PPLL1, and PPLL2 */
2171 	pll_in_use = amdgpu_pll_get_use_mask(crtc);
2172 	if (!(pll_in_use & (1 << ATOM_PPLL2)))
2173 		return ATOM_PPLL2;
2174 	if (!(pll_in_use & (1 << ATOM_PPLL1)))
2175 		return ATOM_PPLL1;
2176 	DRM_ERROR("unable to allocate a PPLL\n");
2177 	return ATOM_PPLL_INVALID;
2178 }
2179 
2180 static void dce_v6_0_lock_cursor(struct drm_crtc *crtc, bool lock)
2181 {
2182 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
2183 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2184 	uint32_t cur_lock;
2185 
2186 	cur_lock = RREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset);
2187 	if (lock)
2188 		cur_lock |= CUR_UPDATE__CURSOR_UPDATE_LOCK_MASK;
2189 	else
2190 		cur_lock &= ~CUR_UPDATE__CURSOR_UPDATE_LOCK_MASK;
2191 	WREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset, cur_lock);
2192 }
2193 
2194 static void dce_v6_0_hide_cursor(struct drm_crtc *crtc)
2195 {
2196 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2197 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
2198 
2199 	WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
2200 	       (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
2201 	       (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
2202 
2203 
2204 }
2205 
2206 static void dce_v6_0_show_cursor(struct drm_crtc *crtc)
2207 {
2208 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2209 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
2210 
2211 	WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
2212 	       upper_32_bits(amdgpu_crtc->cursor_addr));
2213 	WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
2214 	       lower_32_bits(amdgpu_crtc->cursor_addr));
2215 
2216 	WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
2217 	       CUR_CONTROL__CURSOR_EN_MASK |
2218 	       (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
2219 	       (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
2220 
2221 }
2222 
2223 static int dce_v6_0_cursor_move_locked(struct drm_crtc *crtc,
2224 				       int x, int y)
2225 {
2226 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2227 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
2228 	int xorigin = 0, yorigin = 0;
2229 
2230 	int w = amdgpu_crtc->cursor_width;
2231 
2232 	amdgpu_crtc->cursor_x = x;
2233 	amdgpu_crtc->cursor_y = y;
2234 
2235 	/* avivo cursor are offset into the total surface */
2236 	x += crtc->x;
2237 	y += crtc->y;
2238 	DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
2239 
2240 	if (x < 0) {
2241 		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
2242 		x = 0;
2243 	}
2244 	if (y < 0) {
2245 		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
2246 		y = 0;
2247 	}
2248 
2249 	WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
2250 	WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
2251 	WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
2252 	       ((w - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
2253 
2254 	return 0;
2255 }
2256 
2257 static int dce_v6_0_crtc_cursor_move(struct drm_crtc *crtc,
2258 				     int x, int y)
2259 {
2260 	int ret;
2261 
2262 	dce_v6_0_lock_cursor(crtc, true);
2263 	ret = dce_v6_0_cursor_move_locked(crtc, x, y);
2264 	dce_v6_0_lock_cursor(crtc, false);
2265 
2266 	return ret;
2267 }
2268 
2269 static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc,
2270 				     struct drm_file *file_priv,
2271 				     uint32_t handle,
2272 				     uint32_t width,
2273 				     uint32_t height,
2274 				     int32_t hot_x,
2275 				     int32_t hot_y)
2276 {
2277 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2278 	struct drm_gem_object *obj;
2279 	struct amdgpu_bo *aobj;
2280 	int ret;
2281 
2282 	if (!handle) {
2283 		/* turn off cursor */
2284 		dce_v6_0_hide_cursor(crtc);
2285 		obj = NULL;
2286 		goto unpin;
2287 	}
2288 
2289 	if ((width > amdgpu_crtc->max_cursor_width) ||
2290 	    (height > amdgpu_crtc->max_cursor_height)) {
2291 		DRM_ERROR("bad cursor width or height %d x %d\n", width, height);
2292 		return -EINVAL;
2293 	}
2294 
2295 	obj = drm_gem_object_lookup(file_priv, handle);
2296 	if (!obj) {
2297 		DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, amdgpu_crtc->crtc_id);
2298 		return -ENOENT;
2299 	}
2300 
2301 	aobj = gem_to_amdgpu_bo(obj);
2302 	ret = amdgpu_bo_reserve(aobj, false);
2303 	if (ret != 0) {
2304 		drm_gem_object_put(obj);
2305 		return ret;
2306 	}
2307 
2308 	ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
2309 	amdgpu_bo_unreserve(aobj);
2310 	if (ret) {
2311 		DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
2312 		drm_gem_object_put(obj);
2313 		return ret;
2314 	}
2315 	amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
2316 
2317 	dce_v6_0_lock_cursor(crtc, true);
2318 
2319 	if (width != amdgpu_crtc->cursor_width ||
2320 	    height != amdgpu_crtc->cursor_height ||
2321 	    hot_x != amdgpu_crtc->cursor_hot_x ||
2322 	    hot_y != amdgpu_crtc->cursor_hot_y) {
2323 		int x, y;
2324 
2325 		x = amdgpu_crtc->cursor_x + amdgpu_crtc->cursor_hot_x - hot_x;
2326 		y = amdgpu_crtc->cursor_y + amdgpu_crtc->cursor_hot_y - hot_y;
2327 
2328 		dce_v6_0_cursor_move_locked(crtc, x, y);
2329 
2330 		amdgpu_crtc->cursor_width = width;
2331 		amdgpu_crtc->cursor_height = height;
2332 		amdgpu_crtc->cursor_hot_x = hot_x;
2333 		amdgpu_crtc->cursor_hot_y = hot_y;
2334 	}
2335 
2336 	dce_v6_0_show_cursor(crtc);
2337 	dce_v6_0_lock_cursor(crtc, false);
2338 
2339 unpin:
2340 	if (amdgpu_crtc->cursor_bo) {
2341 		struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
2342 		ret = amdgpu_bo_reserve(aobj, true);
2343 		if (likely(ret == 0)) {
2344 			amdgpu_bo_unpin(aobj);
2345 			amdgpu_bo_unreserve(aobj);
2346 		}
2347 		drm_gem_object_put(amdgpu_crtc->cursor_bo);
2348 	}
2349 
2350 	amdgpu_crtc->cursor_bo = obj;
2351 	return 0;
2352 }
2353 
2354 static void dce_v6_0_cursor_reset(struct drm_crtc *crtc)
2355 {
2356 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2357 
2358 	if (amdgpu_crtc->cursor_bo) {
2359 		dce_v6_0_lock_cursor(crtc, true);
2360 
2361 		dce_v6_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
2362 					    amdgpu_crtc->cursor_y);
2363 
2364 		dce_v6_0_show_cursor(crtc);
2365 		dce_v6_0_lock_cursor(crtc, false);
2366 	}
2367 }
2368 
2369 static int dce_v6_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
2370 				   u16 *blue, uint32_t size,
2371 				   struct drm_modeset_acquire_ctx *ctx)
2372 {
2373 	dce_v6_0_crtc_load_lut(crtc);
2374 
2375 	return 0;
2376 }
2377 
2378 static void dce_v6_0_crtc_destroy(struct drm_crtc *crtc)
2379 {
2380 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2381 
2382 	drm_crtc_cleanup(crtc);
2383 	kfree(amdgpu_crtc);
2384 }
2385 
2386 static const struct drm_crtc_funcs dce_v6_0_crtc_funcs = {
2387 	.cursor_set2 = dce_v6_0_crtc_cursor_set2,
2388 	.cursor_move = dce_v6_0_crtc_cursor_move,
2389 	.gamma_set = dce_v6_0_crtc_gamma_set,
2390 	.set_config = amdgpu_display_crtc_set_config,
2391 	.destroy = dce_v6_0_crtc_destroy,
2392 	.page_flip_target = amdgpu_display_crtc_page_flip_target,
2393 	.get_vblank_counter = amdgpu_get_vblank_counter_kms,
2394 	.enable_vblank = amdgpu_enable_vblank_kms,
2395 	.disable_vblank = amdgpu_disable_vblank_kms,
2396 	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
2397 };
2398 
2399 static void dce_v6_0_crtc_dpms(struct drm_crtc *crtc, int mode)
2400 {
2401 	struct drm_device *dev = crtc->dev;
2402 	struct amdgpu_device *adev = drm_to_adev(dev);
2403 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2404 	unsigned type;
2405 
2406 	switch (mode) {
2407 	case DRM_MODE_DPMS_ON:
2408 		amdgpu_crtc->enabled = true;
2409 		amdgpu_atombios_crtc_enable(crtc, ATOM_ENABLE);
2410 		amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
2411 		/* Make sure VBLANK and PFLIP interrupts are still enabled */
2412 		type = amdgpu_display_crtc_idx_to_irq_type(adev,
2413 						amdgpu_crtc->crtc_id);
2414 		amdgpu_irq_update(adev, &adev->crtc_irq, type);
2415 		amdgpu_irq_update(adev, &adev->pageflip_irq, type);
2416 		drm_crtc_vblank_on(crtc);
2417 		dce_v6_0_crtc_load_lut(crtc);
2418 		break;
2419 	case DRM_MODE_DPMS_STANDBY:
2420 	case DRM_MODE_DPMS_SUSPEND:
2421 	case DRM_MODE_DPMS_OFF:
2422 		drm_crtc_vblank_off(crtc);
2423 		if (amdgpu_crtc->enabled)
2424 			amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE);
2425 		amdgpu_atombios_crtc_enable(crtc, ATOM_DISABLE);
2426 		amdgpu_crtc->enabled = false;
2427 		break;
2428 	}
2429 	/* adjust pm to dpms */
2430 	amdgpu_dpm_compute_clocks(adev);
2431 }
2432 
2433 static void dce_v6_0_crtc_prepare(struct drm_crtc *crtc)
2434 {
2435 	/* disable crtc pair power gating before programming */
2436 	amdgpu_atombios_crtc_powergate(crtc, ATOM_DISABLE);
2437 	amdgpu_atombios_crtc_lock(crtc, ATOM_ENABLE);
2438 	dce_v6_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
2439 }
2440 
2441 static void dce_v6_0_crtc_commit(struct drm_crtc *crtc)
2442 {
2443 	dce_v6_0_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
2444 	amdgpu_atombios_crtc_lock(crtc, ATOM_DISABLE);
2445 }
2446 
2447 static void dce_v6_0_crtc_disable(struct drm_crtc *crtc)
2448 {
2449 
2450 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2451 	struct drm_device *dev = crtc->dev;
2452 	struct amdgpu_device *adev = drm_to_adev(dev);
2453 	struct amdgpu_atom_ss ss;
2454 	int i;
2455 
2456 	dce_v6_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
2457 	if (crtc->primary->fb) {
2458 		int r;
2459 		struct amdgpu_bo *abo;
2460 
2461 		abo = gem_to_amdgpu_bo(crtc->primary->fb->obj[0]);
2462 		r = amdgpu_bo_reserve(abo, true);
2463 		if (unlikely(r))
2464 			DRM_ERROR("failed to reserve abo before unpin\n");
2465 		else {
2466 			amdgpu_bo_unpin(abo);
2467 			amdgpu_bo_unreserve(abo);
2468 		}
2469 	}
2470 	/* disable the GRPH */
2471 	dce_v6_0_grph_enable(crtc, false);
2472 
2473 	amdgpu_atombios_crtc_powergate(crtc, ATOM_ENABLE);
2474 
2475 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
2476 		if (adev->mode_info.crtcs[i] &&
2477 		    adev->mode_info.crtcs[i]->enabled &&
2478 		    i != amdgpu_crtc->crtc_id &&
2479 		    amdgpu_crtc->pll_id == adev->mode_info.crtcs[i]->pll_id) {
2480 			/* one other crtc is using this pll don't turn
2481 			 * off the pll
2482 			 */
2483 			goto done;
2484 		}
2485 	}
2486 
2487 	switch (amdgpu_crtc->pll_id) {
2488 	case ATOM_PPLL1:
2489 	case ATOM_PPLL2:
2490 		/* disable the ppll */
2491 		amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id,
2492 						 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
2493 		break;
2494 	default:
2495 		break;
2496 	}
2497 done:
2498 	amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
2499 	amdgpu_crtc->adjusted_clock = 0;
2500 	amdgpu_crtc->encoder = NULL;
2501 	amdgpu_crtc->connector = NULL;
2502 }
2503 
2504 static int dce_v6_0_crtc_mode_set(struct drm_crtc *crtc,
2505 				  struct drm_display_mode *mode,
2506 				  struct drm_display_mode *adjusted_mode,
2507 				  int x, int y, struct drm_framebuffer *old_fb)
2508 {
2509 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2510 
2511 	if (!amdgpu_crtc->adjusted_clock)
2512 		return -EINVAL;
2513 
2514 	amdgpu_atombios_crtc_set_pll(crtc, adjusted_mode);
2515 	amdgpu_atombios_crtc_set_dtd_timing(crtc, adjusted_mode);
2516 	dce_v6_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
2517 	amdgpu_atombios_crtc_overscan_setup(crtc, mode, adjusted_mode);
2518 	amdgpu_atombios_crtc_scaler_setup(crtc);
2519 	dce_v6_0_cursor_reset(crtc);
2520 	/* update the hw version fpr dpm */
2521 	amdgpu_crtc->hw_mode = *adjusted_mode;
2522 
2523 	return 0;
2524 }
2525 
2526 static bool dce_v6_0_crtc_mode_fixup(struct drm_crtc *crtc,
2527 				     const struct drm_display_mode *mode,
2528 				     struct drm_display_mode *adjusted_mode)
2529 {
2530 
2531 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2532 	struct drm_device *dev = crtc->dev;
2533 	struct drm_encoder *encoder;
2534 
2535 	/* assign the encoder to the amdgpu crtc to avoid repeated lookups later */
2536 	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
2537 		if (encoder->crtc == crtc) {
2538 			amdgpu_crtc->encoder = encoder;
2539 			amdgpu_crtc->connector = amdgpu_get_connector_for_encoder(encoder);
2540 			break;
2541 		}
2542 	}
2543 	if ((amdgpu_crtc->encoder == NULL) || (amdgpu_crtc->connector == NULL)) {
2544 		amdgpu_crtc->encoder = NULL;
2545 		amdgpu_crtc->connector = NULL;
2546 		return false;
2547 	}
2548 	if (!amdgpu_display_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
2549 		return false;
2550 	if (amdgpu_atombios_crtc_prepare_pll(crtc, adjusted_mode))
2551 		return false;
2552 	/* pick pll */
2553 	amdgpu_crtc->pll_id = dce_v6_0_pick_pll(crtc);
2554 	/* if we can't get a PPLL for a non-DP encoder, fail */
2555 	if ((amdgpu_crtc->pll_id == ATOM_PPLL_INVALID) &&
2556 	    !ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder)))
2557 		return false;
2558 
2559 	return true;
2560 }
2561 
2562 static int dce_v6_0_crtc_set_base(struct drm_crtc *crtc, int x, int y,
2563 				  struct drm_framebuffer *old_fb)
2564 {
2565 	return dce_v6_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
2566 }
2567 
2568 static int dce_v6_0_crtc_set_base_atomic(struct drm_crtc *crtc,
2569 					 struct drm_framebuffer *fb,
2570 					 int x, int y, enum mode_set_atomic state)
2571 {
2572 	return dce_v6_0_crtc_do_set_base(crtc, fb, x, y, 1);
2573 }
2574 
2575 static const struct drm_crtc_helper_funcs dce_v6_0_crtc_helper_funcs = {
2576 	.dpms = dce_v6_0_crtc_dpms,
2577 	.mode_fixup = dce_v6_0_crtc_mode_fixup,
2578 	.mode_set = dce_v6_0_crtc_mode_set,
2579 	.mode_set_base = dce_v6_0_crtc_set_base,
2580 	.mode_set_base_atomic = dce_v6_0_crtc_set_base_atomic,
2581 	.prepare = dce_v6_0_crtc_prepare,
2582 	.commit = dce_v6_0_crtc_commit,
2583 	.disable = dce_v6_0_crtc_disable,
2584 	.get_scanout_position = amdgpu_crtc_get_scanout_position,
2585 };
2586 
2587 static int dce_v6_0_crtc_init(struct amdgpu_device *adev, int index)
2588 {
2589 	struct amdgpu_crtc *amdgpu_crtc;
2590 
2591 	amdgpu_crtc = kzalloc(sizeof(struct amdgpu_crtc) +
2592 			      (AMDGPUFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
2593 	if (amdgpu_crtc == NULL)
2594 		return -ENOMEM;
2595 
2596 	drm_crtc_init(adev_to_drm(adev), &amdgpu_crtc->base, &dce_v6_0_crtc_funcs);
2597 
2598 	drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256);
2599 	amdgpu_crtc->crtc_id = index;
2600 	adev->mode_info.crtcs[index] = amdgpu_crtc;
2601 
2602 	amdgpu_crtc->max_cursor_width = CURSOR_WIDTH;
2603 	amdgpu_crtc->max_cursor_height = CURSOR_HEIGHT;
2604 	adev_to_drm(adev)->mode_config.cursor_width = amdgpu_crtc->max_cursor_width;
2605 	adev_to_drm(adev)->mode_config.cursor_height = amdgpu_crtc->max_cursor_height;
2606 
2607 	amdgpu_crtc->crtc_offset = crtc_offsets[amdgpu_crtc->crtc_id];
2608 
2609 	amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
2610 	amdgpu_crtc->adjusted_clock = 0;
2611 	amdgpu_crtc->encoder = NULL;
2612 	amdgpu_crtc->connector = NULL;
2613 	drm_crtc_helper_add(&amdgpu_crtc->base, &dce_v6_0_crtc_helper_funcs);
2614 
2615 	return 0;
2616 }
2617 
2618 static int dce_v6_0_early_init(void *handle)
2619 {
2620 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2621 
2622 	adev->audio_endpt_rreg = &dce_v6_0_audio_endpt_rreg;
2623 	adev->audio_endpt_wreg = &dce_v6_0_audio_endpt_wreg;
2624 
2625 	dce_v6_0_set_display_funcs(adev);
2626 
2627 	adev->mode_info.num_crtc = dce_v6_0_get_num_crtc(adev);
2628 
2629 	switch (adev->asic_type) {
2630 	case CHIP_TAHITI:
2631 	case CHIP_PITCAIRN:
2632 	case CHIP_VERDE:
2633 		adev->mode_info.num_hpd = 6;
2634 		adev->mode_info.num_dig = 6;
2635 		break;
2636 	case CHIP_OLAND:
2637 		adev->mode_info.num_hpd = 2;
2638 		adev->mode_info.num_dig = 2;
2639 		break;
2640 	default:
2641 		return -EINVAL;
2642 	}
2643 
2644 	dce_v6_0_set_irq_funcs(adev);
2645 
2646 	return 0;
2647 }
2648 
2649 static int dce_v6_0_sw_init(void *handle)
2650 {
2651 	int r, i;
2652 	bool ret;
2653 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2654 
2655 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
2656 		r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i + 1, &adev->crtc_irq);
2657 		if (r)
2658 			return r;
2659 	}
2660 
2661 	for (i = 8; i < 20; i += 2) {
2662 		r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i, &adev->pageflip_irq);
2663 		if (r)
2664 			return r;
2665 	}
2666 
2667 	/* HPD hotplug */
2668 	r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 42, &adev->hpd_irq);
2669 	if (r)
2670 		return r;
2671 
2672 	adev->mode_info.mode_config_initialized = true;
2673 
2674 	adev_to_drm(adev)->mode_config.funcs = &amdgpu_mode_funcs;
2675 	adev_to_drm(adev)->mode_config.async_page_flip = true;
2676 	adev_to_drm(adev)->mode_config.max_width = 16384;
2677 	adev_to_drm(adev)->mode_config.max_height = 16384;
2678 	adev_to_drm(adev)->mode_config.preferred_depth = 24;
2679 	adev_to_drm(adev)->mode_config.prefer_shadow = 1;
2680 	adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true;
2681 
2682 	r = amdgpu_display_modeset_create_props(adev);
2683 	if (r)
2684 		return r;
2685 
2686 	adev_to_drm(adev)->mode_config.max_width = 16384;
2687 	adev_to_drm(adev)->mode_config.max_height = 16384;
2688 
2689 	/* allocate crtcs */
2690 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
2691 		r = dce_v6_0_crtc_init(adev, i);
2692 		if (r)
2693 			return r;
2694 	}
2695 
2696 	ret = amdgpu_atombios_get_connector_info_from_object_table(adev);
2697 	if (ret)
2698 		amdgpu_display_print_display_setup(adev_to_drm(adev));
2699 	else
2700 		return -EINVAL;
2701 
2702 	/* setup afmt */
2703 	r = dce_v6_0_afmt_init(adev);
2704 	if (r)
2705 		return r;
2706 
2707 	r = dce_v6_0_audio_init(adev);
2708 	if (r)
2709 		return r;
2710 
2711 	/* Disable vblank IRQs aggressively for power-saving */
2712 	/* XXX: can this be enabled for DC? */
2713 	adev_to_drm(adev)->vblank_disable_immediate = true;
2714 
2715 	r = drm_vblank_init(adev_to_drm(adev), adev->mode_info.num_crtc);
2716 	if (r)
2717 		return r;
2718 
2719 	/* Pre-DCE11 */
2720 	INIT_DELAYED_WORK(&adev->hotplug_work,
2721 		  amdgpu_display_hotplug_work_func);
2722 
2723 	drm_kms_helper_poll_init(adev_to_drm(adev));
2724 
2725 	return r;
2726 }
2727 
2728 static int dce_v6_0_sw_fini(void *handle)
2729 {
2730 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2731 
2732 	kfree(adev->mode_info.bios_hardcoded_edid);
2733 
2734 	drm_kms_helper_poll_fini(adev_to_drm(adev));
2735 
2736 	dce_v6_0_audio_fini(adev);
2737 	dce_v6_0_afmt_fini(adev);
2738 
2739 	drm_mode_config_cleanup(adev_to_drm(adev));
2740 	adev->mode_info.mode_config_initialized = false;
2741 
2742 	return 0;
2743 }
2744 
2745 static int dce_v6_0_hw_init(void *handle)
2746 {
2747 	int i;
2748 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2749 
2750 	/* disable vga render */
2751 	dce_v6_0_set_vga_render_state(adev, false);
2752 	/* init dig PHYs, disp eng pll */
2753 	amdgpu_atombios_encoder_init_dig(adev);
2754 	amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk);
2755 
2756 	/* initialize hpd */
2757 	dce_v6_0_hpd_init(adev);
2758 
2759 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
2760 		dce_v6_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
2761 	}
2762 
2763 	dce_v6_0_pageflip_interrupt_init(adev);
2764 
2765 	return 0;
2766 }
2767 
2768 static int dce_v6_0_hw_fini(void *handle)
2769 {
2770 	int i;
2771 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2772 
2773 	dce_v6_0_hpd_fini(adev);
2774 
2775 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
2776 		dce_v6_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
2777 	}
2778 
2779 	dce_v6_0_pageflip_interrupt_fini(adev);
2780 
2781 	flush_delayed_work(&adev->hotplug_work);
2782 
2783 	return 0;
2784 }
2785 
2786 static int dce_v6_0_suspend(void *handle)
2787 {
2788 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2789 	int r;
2790 
2791 	r = amdgpu_display_suspend_helper(adev);
2792 	if (r)
2793 		return r;
2794 	adev->mode_info.bl_level =
2795 		amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
2796 
2797 	return dce_v6_0_hw_fini(handle);
2798 }
2799 
2800 static int dce_v6_0_resume(void *handle)
2801 {
2802 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2803 	int ret;
2804 
2805 	amdgpu_atombios_encoder_set_backlight_level_to_reg(adev,
2806 							   adev->mode_info.bl_level);
2807 
2808 	ret = dce_v6_0_hw_init(handle);
2809 
2810 	/* turn on the BL */
2811 	if (adev->mode_info.bl_encoder) {
2812 		u8 bl_level = amdgpu_display_backlight_get_level(adev,
2813 								  adev->mode_info.bl_encoder);
2814 		amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder,
2815 						    bl_level);
2816 	}
2817 	if (ret)
2818 		return ret;
2819 
2820 	return amdgpu_display_resume_helper(adev);
2821 }
2822 
2823 static bool dce_v6_0_is_idle(void *handle)
2824 {
2825 	return true;
2826 }
2827 
2828 static int dce_v6_0_wait_for_idle(void *handle)
2829 {
2830 	return 0;
2831 }
2832 
2833 static int dce_v6_0_soft_reset(void *handle)
2834 {
2835 	DRM_INFO("xxxx: dce_v6_0_soft_reset --- no impl!!\n");
2836 	return 0;
2837 }
2838 
2839 static void dce_v6_0_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev,
2840 						     int crtc,
2841 						     enum amdgpu_interrupt_state state)
2842 {
2843 	u32 reg_block, interrupt_mask;
2844 
2845 	if (crtc >= adev->mode_info.num_crtc) {
2846 		DRM_DEBUG("invalid crtc %d\n", crtc);
2847 		return;
2848 	}
2849 
2850 	switch (crtc) {
2851 	case 0:
2852 		reg_block = SI_CRTC0_REGISTER_OFFSET;
2853 		break;
2854 	case 1:
2855 		reg_block = SI_CRTC1_REGISTER_OFFSET;
2856 		break;
2857 	case 2:
2858 		reg_block = SI_CRTC2_REGISTER_OFFSET;
2859 		break;
2860 	case 3:
2861 		reg_block = SI_CRTC3_REGISTER_OFFSET;
2862 		break;
2863 	case 4:
2864 		reg_block = SI_CRTC4_REGISTER_OFFSET;
2865 		break;
2866 	case 5:
2867 		reg_block = SI_CRTC5_REGISTER_OFFSET;
2868 		break;
2869 	default:
2870 		DRM_DEBUG("invalid crtc %d\n", crtc);
2871 		return;
2872 	}
2873 
2874 	switch (state) {
2875 	case AMDGPU_IRQ_STATE_DISABLE:
2876 		interrupt_mask = RREG32(mmINT_MASK + reg_block);
2877 		interrupt_mask &= ~VBLANK_INT_MASK;
2878 		WREG32(mmINT_MASK + reg_block, interrupt_mask);
2879 		break;
2880 	case AMDGPU_IRQ_STATE_ENABLE:
2881 		interrupt_mask = RREG32(mmINT_MASK + reg_block);
2882 		interrupt_mask |= VBLANK_INT_MASK;
2883 		WREG32(mmINT_MASK + reg_block, interrupt_mask);
2884 		break;
2885 	default:
2886 		break;
2887 	}
2888 }
2889 
2890 static void dce_v6_0_set_crtc_vline_interrupt_state(struct amdgpu_device *adev,
2891 						    int crtc,
2892 						    enum amdgpu_interrupt_state state)
2893 {
2894 
2895 }
2896 
2897 static int dce_v6_0_set_hpd_interrupt_state(struct amdgpu_device *adev,
2898 					    struct amdgpu_irq_src *src,
2899 					    unsigned type,
2900 					    enum amdgpu_interrupt_state state)
2901 {
2902 	u32 dc_hpd_int_cntl;
2903 
2904 	if (type >= adev->mode_info.num_hpd) {
2905 		DRM_DEBUG("invalid hdp %d\n", type);
2906 		return 0;
2907 	}
2908 
2909 	switch (state) {
2910 	case AMDGPU_IRQ_STATE_DISABLE:
2911 		dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type]);
2912 		dc_hpd_int_cntl &= ~DC_HPDx_INT_EN;
2913 		WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl);
2914 		break;
2915 	case AMDGPU_IRQ_STATE_ENABLE:
2916 		dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type]);
2917 		dc_hpd_int_cntl |= DC_HPDx_INT_EN;
2918 		WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl);
2919 		break;
2920 	default:
2921 		break;
2922 	}
2923 
2924 	return 0;
2925 }
2926 
2927 static int dce_v6_0_set_crtc_interrupt_state(struct amdgpu_device *adev,
2928 					     struct amdgpu_irq_src *src,
2929 					     unsigned type,
2930 					     enum amdgpu_interrupt_state state)
2931 {
2932 	switch (type) {
2933 	case AMDGPU_CRTC_IRQ_VBLANK1:
2934 		dce_v6_0_set_crtc_vblank_interrupt_state(adev, 0, state);
2935 		break;
2936 	case AMDGPU_CRTC_IRQ_VBLANK2:
2937 		dce_v6_0_set_crtc_vblank_interrupt_state(adev, 1, state);
2938 		break;
2939 	case AMDGPU_CRTC_IRQ_VBLANK3:
2940 		dce_v6_0_set_crtc_vblank_interrupt_state(adev, 2, state);
2941 		break;
2942 	case AMDGPU_CRTC_IRQ_VBLANK4:
2943 		dce_v6_0_set_crtc_vblank_interrupt_state(adev, 3, state);
2944 		break;
2945 	case AMDGPU_CRTC_IRQ_VBLANK5:
2946 		dce_v6_0_set_crtc_vblank_interrupt_state(adev, 4, state);
2947 		break;
2948 	case AMDGPU_CRTC_IRQ_VBLANK6:
2949 		dce_v6_0_set_crtc_vblank_interrupt_state(adev, 5, state);
2950 		break;
2951 	case AMDGPU_CRTC_IRQ_VLINE1:
2952 		dce_v6_0_set_crtc_vline_interrupt_state(adev, 0, state);
2953 		break;
2954 	case AMDGPU_CRTC_IRQ_VLINE2:
2955 		dce_v6_0_set_crtc_vline_interrupt_state(adev, 1, state);
2956 		break;
2957 	case AMDGPU_CRTC_IRQ_VLINE3:
2958 		dce_v6_0_set_crtc_vline_interrupt_state(adev, 2, state);
2959 		break;
2960 	case AMDGPU_CRTC_IRQ_VLINE4:
2961 		dce_v6_0_set_crtc_vline_interrupt_state(adev, 3, state);
2962 		break;
2963 	case AMDGPU_CRTC_IRQ_VLINE5:
2964 		dce_v6_0_set_crtc_vline_interrupt_state(adev, 4, state);
2965 		break;
2966 	case AMDGPU_CRTC_IRQ_VLINE6:
2967 		dce_v6_0_set_crtc_vline_interrupt_state(adev, 5, state);
2968 		break;
2969 	default:
2970 		break;
2971 	}
2972 	return 0;
2973 }
2974 
2975 static int dce_v6_0_crtc_irq(struct amdgpu_device *adev,
2976 			     struct amdgpu_irq_src *source,
2977 			     struct amdgpu_iv_entry *entry)
2978 {
2979 	unsigned crtc = entry->src_id - 1;
2980 	uint32_t disp_int = RREG32(interrupt_status_offsets[crtc].reg);
2981 	unsigned int irq_type = amdgpu_display_crtc_idx_to_irq_type(adev,
2982 								    crtc);
2983 
2984 	switch (entry->src_data[0]) {
2985 	case 0: /* vblank */
2986 		if (disp_int & interrupt_status_offsets[crtc].vblank)
2987 			WREG32(mmVBLANK_STATUS + crtc_offsets[crtc], VBLANK_ACK);
2988 		else
2989 			DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
2990 
2991 		if (amdgpu_irq_enabled(adev, source, irq_type)) {
2992 			drm_handle_vblank(adev_to_drm(adev), crtc);
2993 		}
2994 		DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
2995 		break;
2996 	case 1: /* vline */
2997 		if (disp_int & interrupt_status_offsets[crtc].vline)
2998 			WREG32(mmVLINE_STATUS + crtc_offsets[crtc], VLINE_ACK);
2999 		else
3000 			DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3001 
3002 		DRM_DEBUG("IH: D%d vline\n", crtc + 1);
3003 		break;
3004 	default:
3005 		DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]);
3006 		break;
3007 	}
3008 
3009 	return 0;
3010 }
3011 
3012 static int dce_v6_0_set_pageflip_interrupt_state(struct amdgpu_device *adev,
3013 						 struct amdgpu_irq_src *src,
3014 						 unsigned type,
3015 						 enum amdgpu_interrupt_state state)
3016 {
3017 	u32 reg;
3018 
3019 	if (type >= adev->mode_info.num_crtc) {
3020 		DRM_ERROR("invalid pageflip crtc %d\n", type);
3021 		return -EINVAL;
3022 	}
3023 
3024 	reg = RREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type]);
3025 	if (state == AMDGPU_IRQ_STATE_DISABLE)
3026 		WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
3027 		       reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
3028 	else
3029 		WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
3030 		       reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
3031 
3032 	return 0;
3033 }
3034 
3035 static int dce_v6_0_pageflip_irq(struct amdgpu_device *adev,
3036 				 struct amdgpu_irq_src *source,
3037 				 struct amdgpu_iv_entry *entry)
3038 {
3039 	unsigned long flags;
3040 	unsigned crtc_id;
3041 	struct amdgpu_crtc *amdgpu_crtc;
3042 	struct amdgpu_flip_work *works;
3043 
3044 	crtc_id = (entry->src_id - 8) >> 1;
3045 	amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
3046 
3047 	if (crtc_id >= adev->mode_info.num_crtc) {
3048 		DRM_ERROR("invalid pageflip crtc %d\n", crtc_id);
3049 		return -EINVAL;
3050 	}
3051 
3052 	if (RREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id]) &
3053 	    GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK)
3054 		WREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id],
3055 		       GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK);
3056 
3057 	/* IRQ could occur when in initial stage */
3058 	if (amdgpu_crtc == NULL)
3059 		return 0;
3060 
3061 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
3062 	works = amdgpu_crtc->pflip_works;
3063 	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
3064 		DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
3065 						"AMDGPU_FLIP_SUBMITTED(%d)\n",
3066 						amdgpu_crtc->pflip_status,
3067 						AMDGPU_FLIP_SUBMITTED);
3068 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
3069 		return 0;
3070 	}
3071 
3072 	/* page flip completed. clean up */
3073 	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
3074 	amdgpu_crtc->pflip_works = NULL;
3075 
3076 	/* wakeup usersapce */
3077 	if (works->event)
3078 		drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event);
3079 
3080 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
3081 
3082 	drm_crtc_vblank_put(&amdgpu_crtc->base);
3083 	schedule_work(&works->unpin_work);
3084 
3085 	return 0;
3086 }
3087 
3088 static int dce_v6_0_hpd_irq(struct amdgpu_device *adev,
3089 			    struct amdgpu_irq_src *source,
3090 			    struct amdgpu_iv_entry *entry)
3091 {
3092 	uint32_t disp_int, mask, tmp;
3093 	unsigned hpd;
3094 
3095 	if (entry->src_data[0] >= adev->mode_info.num_hpd) {
3096 		DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]);
3097 		return 0;
3098 	}
3099 
3100 	hpd = entry->src_data[0];
3101 	disp_int = RREG32(interrupt_status_offsets[hpd].reg);
3102 	mask = interrupt_status_offsets[hpd].hpd;
3103 
3104 	if (disp_int & mask) {
3105 		tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
3106 		tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK;
3107 		WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
3108 		schedule_delayed_work(&adev->hotplug_work, 0);
3109 		DRM_DEBUG("IH: HPD%d\n", hpd + 1);
3110 	}
3111 
3112 	return 0;
3113 
3114 }
3115 
3116 static int dce_v6_0_set_clockgating_state(void *handle,
3117 					  enum amd_clockgating_state state)
3118 {
3119 	return 0;
3120 }
3121 
3122 static int dce_v6_0_set_powergating_state(void *handle,
3123 					  enum amd_powergating_state state)
3124 {
3125 	return 0;
3126 }
3127 
3128 static const struct amd_ip_funcs dce_v6_0_ip_funcs = {
3129 	.name = "dce_v6_0",
3130 	.early_init = dce_v6_0_early_init,
3131 	.late_init = NULL,
3132 	.sw_init = dce_v6_0_sw_init,
3133 	.sw_fini = dce_v6_0_sw_fini,
3134 	.hw_init = dce_v6_0_hw_init,
3135 	.hw_fini = dce_v6_0_hw_fini,
3136 	.suspend = dce_v6_0_suspend,
3137 	.resume = dce_v6_0_resume,
3138 	.is_idle = dce_v6_0_is_idle,
3139 	.wait_for_idle = dce_v6_0_wait_for_idle,
3140 	.soft_reset = dce_v6_0_soft_reset,
3141 	.set_clockgating_state = dce_v6_0_set_clockgating_state,
3142 	.set_powergating_state = dce_v6_0_set_powergating_state,
3143 };
3144 
3145 static void
3146 dce_v6_0_encoder_mode_set(struct drm_encoder *encoder,
3147 			  struct drm_display_mode *mode,
3148 			  struct drm_display_mode *adjusted_mode)
3149 {
3150 
3151 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3152 	int em = amdgpu_atombios_encoder_get_encoder_mode(encoder);
3153 
3154 	amdgpu_encoder->pixel_clock = adjusted_mode->clock;
3155 
3156 	/* need to call this here rather than in prepare() since we need some crtc info */
3157 	amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
3158 
3159 	/* set scaler clears this on some chips */
3160 	dce_v6_0_set_interleave(encoder->crtc, mode);
3161 
3162 	if (em == ATOM_ENCODER_MODE_HDMI || ENCODER_MODE_IS_DP(em)) {
3163 		dce_v6_0_afmt_enable(encoder, true);
3164 		dce_v6_0_afmt_setmode(encoder, adjusted_mode);
3165 	}
3166 }
3167 
3168 static void dce_v6_0_encoder_prepare(struct drm_encoder *encoder)
3169 {
3170 
3171 	struct amdgpu_device *adev = drm_to_adev(encoder->dev);
3172 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3173 	struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
3174 
3175 	if ((amdgpu_encoder->active_device &
3176 	     (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
3177 	    (amdgpu_encoder_get_dp_bridge_encoder_id(encoder) !=
3178 	     ENCODER_OBJECT_ID_NONE)) {
3179 		struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
3180 		if (dig) {
3181 			dig->dig_encoder = dce_v6_0_pick_dig_encoder(encoder);
3182 			if (amdgpu_encoder->active_device & ATOM_DEVICE_DFP_SUPPORT)
3183 				dig->afmt = adev->mode_info.afmt[dig->dig_encoder];
3184 		}
3185 	}
3186 
3187 	amdgpu_atombios_scratch_regs_lock(adev, true);
3188 
3189 	if (connector) {
3190 		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
3191 
3192 		/* select the clock/data port if it uses a router */
3193 		if (amdgpu_connector->router.cd_valid)
3194 			amdgpu_i2c_router_select_cd_port(amdgpu_connector);
3195 
3196 		/* turn eDP panel on for mode set */
3197 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
3198 			amdgpu_atombios_encoder_set_edp_panel_power(connector,
3199 							     ATOM_TRANSMITTER_ACTION_POWER_ON);
3200 	}
3201 
3202 	/* this is needed for the pll/ss setup to work correctly in some cases */
3203 	amdgpu_atombios_encoder_set_crtc_source(encoder);
3204 	/* set up the FMT blocks */
3205 	dce_v6_0_program_fmt(encoder);
3206 }
3207 
3208 static void dce_v6_0_encoder_commit(struct drm_encoder *encoder)
3209 {
3210 
3211 	struct drm_device *dev = encoder->dev;
3212 	struct amdgpu_device *adev = drm_to_adev(dev);
3213 
3214 	/* need to call this here as we need the crtc set up */
3215 	amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
3216 	amdgpu_atombios_scratch_regs_lock(adev, false);
3217 }
3218 
3219 static void dce_v6_0_encoder_disable(struct drm_encoder *encoder)
3220 {
3221 
3222 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3223 	struct amdgpu_encoder_atom_dig *dig;
3224 	int em = amdgpu_atombios_encoder_get_encoder_mode(encoder);
3225 
3226 	amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
3227 
3228 	if (amdgpu_atombios_encoder_is_digital(encoder)) {
3229 		if (em == ATOM_ENCODER_MODE_HDMI || ENCODER_MODE_IS_DP(em))
3230 			dce_v6_0_afmt_enable(encoder, false);
3231 		dig = amdgpu_encoder->enc_priv;
3232 		dig->dig_encoder = -1;
3233 	}
3234 	amdgpu_encoder->active_device = 0;
3235 }
3236 
3237 /* these are handled by the primary encoders */
3238 static void dce_v6_0_ext_prepare(struct drm_encoder *encoder)
3239 {
3240 
3241 }
3242 
3243 static void dce_v6_0_ext_commit(struct drm_encoder *encoder)
3244 {
3245 
3246 }
3247 
3248 static void
3249 dce_v6_0_ext_mode_set(struct drm_encoder *encoder,
3250 		      struct drm_display_mode *mode,
3251 		      struct drm_display_mode *adjusted_mode)
3252 {
3253 
3254 }
3255 
3256 static void dce_v6_0_ext_disable(struct drm_encoder *encoder)
3257 {
3258 
3259 }
3260 
3261 static void
3262 dce_v6_0_ext_dpms(struct drm_encoder *encoder, int mode)
3263 {
3264 
3265 }
3266 
3267 static bool dce_v6_0_ext_mode_fixup(struct drm_encoder *encoder,
3268 				    const struct drm_display_mode *mode,
3269 				    struct drm_display_mode *adjusted_mode)
3270 {
3271 	return true;
3272 }
3273 
3274 static const struct drm_encoder_helper_funcs dce_v6_0_ext_helper_funcs = {
3275 	.dpms = dce_v6_0_ext_dpms,
3276 	.mode_fixup = dce_v6_0_ext_mode_fixup,
3277 	.prepare = dce_v6_0_ext_prepare,
3278 	.mode_set = dce_v6_0_ext_mode_set,
3279 	.commit = dce_v6_0_ext_commit,
3280 	.disable = dce_v6_0_ext_disable,
3281 	/* no detect for TMDS/LVDS yet */
3282 };
3283 
3284 static const struct drm_encoder_helper_funcs dce_v6_0_dig_helper_funcs = {
3285 	.dpms = amdgpu_atombios_encoder_dpms,
3286 	.mode_fixup = amdgpu_atombios_encoder_mode_fixup,
3287 	.prepare = dce_v6_0_encoder_prepare,
3288 	.mode_set = dce_v6_0_encoder_mode_set,
3289 	.commit = dce_v6_0_encoder_commit,
3290 	.disable = dce_v6_0_encoder_disable,
3291 	.detect = amdgpu_atombios_encoder_dig_detect,
3292 };
3293 
3294 static const struct drm_encoder_helper_funcs dce_v6_0_dac_helper_funcs = {
3295 	.dpms = amdgpu_atombios_encoder_dpms,
3296 	.mode_fixup = amdgpu_atombios_encoder_mode_fixup,
3297 	.prepare = dce_v6_0_encoder_prepare,
3298 	.mode_set = dce_v6_0_encoder_mode_set,
3299 	.commit = dce_v6_0_encoder_commit,
3300 	.detect = amdgpu_atombios_encoder_dac_detect,
3301 };
3302 
3303 static void dce_v6_0_encoder_destroy(struct drm_encoder *encoder)
3304 {
3305 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3306 	if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
3307 		amdgpu_atombios_encoder_fini_backlight(amdgpu_encoder);
3308 	kfree(amdgpu_encoder->enc_priv);
3309 	drm_encoder_cleanup(encoder);
3310 	kfree(amdgpu_encoder);
3311 }
3312 
3313 static const struct drm_encoder_funcs dce_v6_0_encoder_funcs = {
3314 	.destroy = dce_v6_0_encoder_destroy,
3315 };
3316 
3317 static void dce_v6_0_encoder_add(struct amdgpu_device *adev,
3318 				 uint32_t encoder_enum,
3319 				 uint32_t supported_device,
3320 				 u16 caps)
3321 {
3322 	struct drm_device *dev = adev_to_drm(adev);
3323 	struct drm_encoder *encoder;
3324 	struct amdgpu_encoder *amdgpu_encoder;
3325 
3326 	/* see if we already added it */
3327 	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
3328 		amdgpu_encoder = to_amdgpu_encoder(encoder);
3329 		if (amdgpu_encoder->encoder_enum == encoder_enum) {
3330 			amdgpu_encoder->devices |= supported_device;
3331 			return;
3332 		}
3333 
3334 	}
3335 
3336 	/* add a new one */
3337 	amdgpu_encoder = kzalloc(sizeof(struct amdgpu_encoder), GFP_KERNEL);
3338 	if (!amdgpu_encoder)
3339 		return;
3340 
3341 	encoder = &amdgpu_encoder->base;
3342 	switch (adev->mode_info.num_crtc) {
3343 	case 1:
3344 		encoder->possible_crtcs = 0x1;
3345 		break;
3346 	case 2:
3347 	default:
3348 		encoder->possible_crtcs = 0x3;
3349 		break;
3350 	case 4:
3351 		encoder->possible_crtcs = 0xf;
3352 		break;
3353 	case 6:
3354 		encoder->possible_crtcs = 0x3f;
3355 		break;
3356 	}
3357 
3358 	amdgpu_encoder->enc_priv = NULL;
3359 	amdgpu_encoder->encoder_enum = encoder_enum;
3360 	amdgpu_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
3361 	amdgpu_encoder->devices = supported_device;
3362 	amdgpu_encoder->rmx_type = RMX_OFF;
3363 	amdgpu_encoder->underscan_type = UNDERSCAN_OFF;
3364 	amdgpu_encoder->is_ext_encoder = false;
3365 	amdgpu_encoder->caps = caps;
3366 
3367 	switch (amdgpu_encoder->encoder_id) {
3368 	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
3369 	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
3370 		drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3371 				 DRM_MODE_ENCODER_DAC, NULL);
3372 		drm_encoder_helper_add(encoder, &dce_v6_0_dac_helper_funcs);
3373 		break;
3374 	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
3375 	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
3376 	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
3377 	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
3378 	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
3379 		if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
3380 			amdgpu_encoder->rmx_type = RMX_FULL;
3381 			drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3382 					 DRM_MODE_ENCODER_LVDS, NULL);
3383 			amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_lcd_info(amdgpu_encoder);
3384 		} else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) {
3385 			drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3386 					 DRM_MODE_ENCODER_DAC, NULL);
3387 			amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
3388 		} else {
3389 			drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3390 					 DRM_MODE_ENCODER_TMDS, NULL);
3391 			amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
3392 		}
3393 		drm_encoder_helper_add(encoder, &dce_v6_0_dig_helper_funcs);
3394 		break;
3395 	case ENCODER_OBJECT_ID_SI170B:
3396 	case ENCODER_OBJECT_ID_CH7303:
3397 	case ENCODER_OBJECT_ID_EXTERNAL_SDVOA:
3398 	case ENCODER_OBJECT_ID_EXTERNAL_SDVOB:
3399 	case ENCODER_OBJECT_ID_TITFP513:
3400 	case ENCODER_OBJECT_ID_VT1623:
3401 	case ENCODER_OBJECT_ID_HDMI_SI1930:
3402 	case ENCODER_OBJECT_ID_TRAVIS:
3403 	case ENCODER_OBJECT_ID_NUTMEG:
3404 		/* these are handled by the primary encoders */
3405 		amdgpu_encoder->is_ext_encoder = true;
3406 		if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
3407 			drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3408 					 DRM_MODE_ENCODER_LVDS, NULL);
3409 		else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT))
3410 			drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3411 					 DRM_MODE_ENCODER_DAC, NULL);
3412 		else
3413 			drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3414 					 DRM_MODE_ENCODER_TMDS, NULL);
3415 		drm_encoder_helper_add(encoder, &dce_v6_0_ext_helper_funcs);
3416 		break;
3417 	}
3418 }
3419 
3420 static const struct amdgpu_display_funcs dce_v6_0_display_funcs = {
3421 	.bandwidth_update = &dce_v6_0_bandwidth_update,
3422 	.vblank_get_counter = &dce_v6_0_vblank_get_counter,
3423 	.backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level,
3424 	.backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level,
3425 	.hpd_sense = &dce_v6_0_hpd_sense,
3426 	.hpd_set_polarity = &dce_v6_0_hpd_set_polarity,
3427 	.hpd_get_gpio_reg = &dce_v6_0_hpd_get_gpio_reg,
3428 	.page_flip = &dce_v6_0_page_flip,
3429 	.page_flip_get_scanoutpos = &dce_v6_0_crtc_get_scanoutpos,
3430 	.add_encoder = &dce_v6_0_encoder_add,
3431 	.add_connector = &amdgpu_connector_add,
3432 };
3433 
3434 static void dce_v6_0_set_display_funcs(struct amdgpu_device *adev)
3435 {
3436 	adev->mode_info.funcs = &dce_v6_0_display_funcs;
3437 }
3438 
3439 static const struct amdgpu_irq_src_funcs dce_v6_0_crtc_irq_funcs = {
3440 	.set = dce_v6_0_set_crtc_interrupt_state,
3441 	.process = dce_v6_0_crtc_irq,
3442 };
3443 
3444 static const struct amdgpu_irq_src_funcs dce_v6_0_pageflip_irq_funcs = {
3445 	.set = dce_v6_0_set_pageflip_interrupt_state,
3446 	.process = dce_v6_0_pageflip_irq,
3447 };
3448 
3449 static const struct amdgpu_irq_src_funcs dce_v6_0_hpd_irq_funcs = {
3450 	.set = dce_v6_0_set_hpd_interrupt_state,
3451 	.process = dce_v6_0_hpd_irq,
3452 };
3453 
3454 static void dce_v6_0_set_irq_funcs(struct amdgpu_device *adev)
3455 {
3456 	if (adev->mode_info.num_crtc > 0)
3457 		adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_VLINE1 + adev->mode_info.num_crtc;
3458 	else
3459 		adev->crtc_irq.num_types = 0;
3460 	adev->crtc_irq.funcs = &dce_v6_0_crtc_irq_funcs;
3461 
3462 	adev->pageflip_irq.num_types = adev->mode_info.num_crtc;
3463 	adev->pageflip_irq.funcs = &dce_v6_0_pageflip_irq_funcs;
3464 
3465 	adev->hpd_irq.num_types = adev->mode_info.num_hpd;
3466 	adev->hpd_irq.funcs = &dce_v6_0_hpd_irq_funcs;
3467 }
3468 
3469 const struct amdgpu_ip_block_version dce_v6_0_ip_block =
3470 {
3471 	.type = AMD_IP_BLOCK_TYPE_DCE,
3472 	.major = 6,
3473 	.minor = 0,
3474 	.rev = 0,
3475 	.funcs = &dce_v6_0_ip_funcs,
3476 };
3477 
3478 const struct amdgpu_ip_block_version dce_v6_4_ip_block =
3479 {
3480 	.type = AMD_IP_BLOCK_TYPE_DCE,
3481 	.major = 6,
3482 	.minor = 4,
3483 	.rev = 0,
3484 	.funcs = &dce_v6_0_ip_funcs,
3485 };
3486