xref: /linux/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c (revision d53b8e36925256097a08d7cb749198d85cbf9b2b)
1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <drm/drm_edid.h>
25 #include <drm/drm_fourcc.h>
26 #include <drm/drm_modeset_helper.h>
27 #include <drm/drm_modeset_helper_vtables.h>
28 #include <drm/drm_vblank.h>
29 
30 #include "amdgpu.h"
31 #include "amdgpu_pm.h"
32 #include "amdgpu_i2c.h"
33 #include "vid.h"
34 #include "atom.h"
35 #include "amdgpu_atombios.h"
36 #include "atombios_crtc.h"
37 #include "atombios_encoders.h"
38 #include "amdgpu_pll.h"
39 #include "amdgpu_connectors.h"
40 #include "amdgpu_display.h"
41 #include "dce_v10_0.h"
42 
43 #include "dce/dce_10_0_d.h"
44 #include "dce/dce_10_0_sh_mask.h"
45 #include "dce/dce_10_0_enum.h"
46 #include "oss/oss_3_0_d.h"
47 #include "oss/oss_3_0_sh_mask.h"
48 #include "gmc/gmc_8_1_d.h"
49 #include "gmc/gmc_8_1_sh_mask.h"
50 
51 #include "ivsrcid/ivsrcid_vislands30.h"
52 
53 static void dce_v10_0_set_display_funcs(struct amdgpu_device *adev);
54 static void dce_v10_0_set_irq_funcs(struct amdgpu_device *adev);
55 static void dce_v10_0_hpd_int_ack(struct amdgpu_device *adev, int hpd);
56 
57 static const u32 crtc_offsets[] = {
58 	CRTC0_REGISTER_OFFSET,
59 	CRTC1_REGISTER_OFFSET,
60 	CRTC2_REGISTER_OFFSET,
61 	CRTC3_REGISTER_OFFSET,
62 	CRTC4_REGISTER_OFFSET,
63 	CRTC5_REGISTER_OFFSET,
64 	CRTC6_REGISTER_OFFSET
65 };
66 
67 static const u32 hpd_offsets[] = {
68 	HPD0_REGISTER_OFFSET,
69 	HPD1_REGISTER_OFFSET,
70 	HPD2_REGISTER_OFFSET,
71 	HPD3_REGISTER_OFFSET,
72 	HPD4_REGISTER_OFFSET,
73 	HPD5_REGISTER_OFFSET
74 };
75 
76 static const uint32_t dig_offsets[] = {
77 	DIG0_REGISTER_OFFSET,
78 	DIG1_REGISTER_OFFSET,
79 	DIG2_REGISTER_OFFSET,
80 	DIG3_REGISTER_OFFSET,
81 	DIG4_REGISTER_OFFSET,
82 	DIG5_REGISTER_OFFSET,
83 	DIG6_REGISTER_OFFSET
84 };
85 
86 static const struct {
87 	uint32_t        reg;
88 	uint32_t        vblank;
89 	uint32_t        vline;
90 	uint32_t        hpd;
91 
92 } interrupt_status_offsets[] = { {
93 	.reg = mmDISP_INTERRUPT_STATUS,
94 	.vblank = DISP_INTERRUPT_STATUS__LB_D1_VBLANK_INTERRUPT_MASK,
95 	.vline = DISP_INTERRUPT_STATUS__LB_D1_VLINE_INTERRUPT_MASK,
96 	.hpd = DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT_MASK
97 }, {
98 	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE,
99 	.vblank = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VBLANK_INTERRUPT_MASK,
100 	.vline = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VLINE_INTERRUPT_MASK,
101 	.hpd = DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_INTERRUPT_MASK
102 }, {
103 	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE2,
104 	.vblank = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VBLANK_INTERRUPT_MASK,
105 	.vline = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VLINE_INTERRUPT_MASK,
106 	.hpd = DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_INTERRUPT_MASK
107 }, {
108 	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE3,
109 	.vblank = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VBLANK_INTERRUPT_MASK,
110 	.vline = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VLINE_INTERRUPT_MASK,
111 	.hpd = DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_INTERRUPT_MASK
112 }, {
113 	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE4,
114 	.vblank = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VBLANK_INTERRUPT_MASK,
115 	.vline = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VLINE_INTERRUPT_MASK,
116 	.hpd = DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_INTERRUPT_MASK
117 }, {
118 	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE5,
119 	.vblank = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VBLANK_INTERRUPT_MASK,
120 	.vline = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VLINE_INTERRUPT_MASK,
121 	.hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK
122 } };
123 
124 static const u32 golden_settings_tonga_a11[] = {
125 	mmDCI_CLK_CNTL, 0x00000080, 0x00000000,
126 	mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070,
127 	mmFBC_MISC, 0x1f311fff, 0x12300000,
128 	mmHDMI_CONTROL, 0x31000111, 0x00000011,
129 };
130 
131 static const u32 tonga_mgcg_cgcg_init[] = {
132 	mmXDMA_CLOCK_GATING_CNTL, 0xffffffff, 0x00000100,
133 	mmXDMA_MEM_POWER_CNTL, 0x00000101, 0x00000000,
134 };
135 
136 static const u32 golden_settings_fiji_a10[] = {
137 	mmDCI_CLK_CNTL, 0x00000080, 0x00000000,
138 	mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070,
139 	mmFBC_MISC, 0x1f311fff, 0x12300000,
140 	mmHDMI_CONTROL, 0x31000111, 0x00000011,
141 };
142 
143 static const u32 fiji_mgcg_cgcg_init[] = {
144 	mmXDMA_CLOCK_GATING_CNTL, 0xffffffff, 0x00000100,
145 	mmXDMA_MEM_POWER_CNTL, 0x00000101, 0x00000000,
146 };
147 
148 static void dce_v10_0_init_golden_registers(struct amdgpu_device *adev)
149 {
150 	switch (adev->asic_type) {
151 	case CHIP_FIJI:
152 		amdgpu_device_program_register_sequence(adev,
153 							fiji_mgcg_cgcg_init,
154 							ARRAY_SIZE(fiji_mgcg_cgcg_init));
155 		amdgpu_device_program_register_sequence(adev,
156 							golden_settings_fiji_a10,
157 							ARRAY_SIZE(golden_settings_fiji_a10));
158 		break;
159 	case CHIP_TONGA:
160 		amdgpu_device_program_register_sequence(adev,
161 							tonga_mgcg_cgcg_init,
162 							ARRAY_SIZE(tonga_mgcg_cgcg_init));
163 		amdgpu_device_program_register_sequence(adev,
164 							golden_settings_tonga_a11,
165 							ARRAY_SIZE(golden_settings_tonga_a11));
166 		break;
167 	default:
168 		break;
169 	}
170 }
171 
172 static u32 dce_v10_0_audio_endpt_rreg(struct amdgpu_device *adev,
173 				     u32 block_offset, u32 reg)
174 {
175 	unsigned long flags;
176 	u32 r;
177 
178 	spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags);
179 	WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
180 	r = RREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset);
181 	spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
182 
183 	return r;
184 }
185 
186 static void dce_v10_0_audio_endpt_wreg(struct amdgpu_device *adev,
187 				      u32 block_offset, u32 reg, u32 v)
188 {
189 	unsigned long flags;
190 
191 	spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags);
192 	WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
193 	WREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset, v);
194 	spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
195 }
196 
197 static u32 dce_v10_0_vblank_get_counter(struct amdgpu_device *adev, int crtc)
198 {
199 	if (crtc >= adev->mode_info.num_crtc)
200 		return 0;
201 	else
202 		return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
203 }
204 
205 static void dce_v10_0_pageflip_interrupt_init(struct amdgpu_device *adev)
206 {
207 	unsigned i;
208 
209 	/* Enable pflip interrupts */
210 	for (i = 0; i < adev->mode_info.num_crtc; i++)
211 		amdgpu_irq_get(adev, &adev->pageflip_irq, i);
212 }
213 
214 static void dce_v10_0_pageflip_interrupt_fini(struct amdgpu_device *adev)
215 {
216 	unsigned i;
217 
218 	/* Disable pflip interrupts */
219 	for (i = 0; i < adev->mode_info.num_crtc; i++)
220 		amdgpu_irq_put(adev, &adev->pageflip_irq, i);
221 }
222 
223 /**
224  * dce_v10_0_page_flip - pageflip callback.
225  *
226  * @adev: amdgpu_device pointer
227  * @crtc_id: crtc to cleanup pageflip on
228  * @crtc_base: new address of the crtc (GPU MC address)
229  * @async: asynchronous flip
230  *
231  * Triggers the actual pageflip by updating the primary
232  * surface base address.
233  */
234 static void dce_v10_0_page_flip(struct amdgpu_device *adev,
235 				int crtc_id, u64 crtc_base, bool async)
236 {
237 	struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
238 	struct drm_framebuffer *fb = amdgpu_crtc->base.primary->fb;
239 	u32 tmp;
240 
241 	/* flip at hsync for async, default is vsync */
242 	tmp = RREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset);
243 	tmp = REG_SET_FIELD(tmp, GRPH_FLIP_CONTROL,
244 			    GRPH_SURFACE_UPDATE_H_RETRACE_EN, async ? 1 : 0);
245 	WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, tmp);
246 	/* update pitch */
247 	WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset,
248 	       fb->pitches[0] / fb->format->cpp[0]);
249 	/* update the primary scanout address */
250 	WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
251 	       upper_32_bits(crtc_base));
252 	/* writing to the low address triggers the update */
253 	WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
254 	       lower_32_bits(crtc_base));
255 	/* post the write */
256 	RREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset);
257 }
258 
259 static int dce_v10_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
260 					u32 *vbl, u32 *position)
261 {
262 	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
263 		return -EINVAL;
264 
265 	*vbl = RREG32(mmCRTC_V_BLANK_START_END + crtc_offsets[crtc]);
266 	*position = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
267 
268 	return 0;
269 }
270 
271 /**
272  * dce_v10_0_hpd_sense - hpd sense callback.
273  *
274  * @adev: amdgpu_device pointer
275  * @hpd: hpd (hotplug detect) pin
276  *
277  * Checks if a digital monitor is connected (evergreen+).
278  * Returns true if connected, false if not connected.
279  */
280 static bool dce_v10_0_hpd_sense(struct amdgpu_device *adev,
281 			       enum amdgpu_hpd_id hpd)
282 {
283 	bool connected = false;
284 
285 	if (hpd >= adev->mode_info.num_hpd)
286 		return connected;
287 
288 	if (RREG32(mmDC_HPD_INT_STATUS + hpd_offsets[hpd]) &
289 	    DC_HPD_INT_STATUS__DC_HPD_SENSE_MASK)
290 		connected = true;
291 
292 	return connected;
293 }
294 
295 /**
296  * dce_v10_0_hpd_set_polarity - hpd set polarity callback.
297  *
298  * @adev: amdgpu_device pointer
299  * @hpd: hpd (hotplug detect) pin
300  *
301  * Set the polarity of the hpd pin (evergreen+).
302  */
303 static void dce_v10_0_hpd_set_polarity(struct amdgpu_device *adev,
304 				      enum amdgpu_hpd_id hpd)
305 {
306 	u32 tmp;
307 	bool connected = dce_v10_0_hpd_sense(adev, hpd);
308 
309 	if (hpd >= adev->mode_info.num_hpd)
310 		return;
311 
312 	tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]);
313 	if (connected)
314 		tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY, 0);
315 	else
316 		tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY, 1);
317 	WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp);
318 }
319 
320 /**
321  * dce_v10_0_hpd_init - hpd setup callback.
322  *
323  * @adev: amdgpu_device pointer
324  *
325  * Setup the hpd pins used by the card (evergreen+).
326  * Enable the pin, set the polarity, and enable the hpd interrupts.
327  */
328 static void dce_v10_0_hpd_init(struct amdgpu_device *adev)
329 {
330 	struct drm_device *dev = adev_to_drm(adev);
331 	struct drm_connector *connector;
332 	struct drm_connector_list_iter iter;
333 	u32 tmp;
334 
335 	drm_connector_list_iter_begin(dev, &iter);
336 	drm_for_each_connector_iter(connector, &iter) {
337 		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
338 
339 		if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
340 			continue;
341 
342 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
343 		    connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
344 			/* don't try to enable hpd on eDP or LVDS avoid breaking the
345 			 * aux dp channel on imac and help (but not completely fix)
346 			 * https://bugzilla.redhat.com/show_bug.cgi?id=726143
347 			 * also avoid interrupt storms during dpms.
348 			 */
349 			tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
350 			tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 0);
351 			WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
352 			continue;
353 		}
354 
355 		tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
356 		tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 1);
357 		WREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
358 
359 		tmp = RREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[amdgpu_connector->hpd.hpd]);
360 		tmp = REG_SET_FIELD(tmp, DC_HPD_TOGGLE_FILT_CNTL,
361 				    DC_HPD_CONNECT_INT_DELAY,
362 				    AMDGPU_HPD_CONNECT_INT_DELAY_IN_MS);
363 		tmp = REG_SET_FIELD(tmp, DC_HPD_TOGGLE_FILT_CNTL,
364 				    DC_HPD_DISCONNECT_INT_DELAY,
365 				    AMDGPU_HPD_DISCONNECT_INT_DELAY_IN_MS);
366 		WREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
367 
368 		dce_v10_0_hpd_int_ack(adev, amdgpu_connector->hpd.hpd);
369 		dce_v10_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
370 		amdgpu_irq_get(adev, &adev->hpd_irq,
371 			       amdgpu_connector->hpd.hpd);
372 	}
373 	drm_connector_list_iter_end(&iter);
374 }
375 
376 /**
377  * dce_v10_0_hpd_fini - hpd tear down callback.
378  *
379  * @adev: amdgpu_device pointer
380  *
381  * Tear down the hpd pins used by the card (evergreen+).
382  * Disable the hpd interrupts.
383  */
384 static void dce_v10_0_hpd_fini(struct amdgpu_device *adev)
385 {
386 	struct drm_device *dev = adev_to_drm(adev);
387 	struct drm_connector *connector;
388 	struct drm_connector_list_iter iter;
389 	u32 tmp;
390 
391 	drm_connector_list_iter_begin(dev, &iter);
392 	drm_for_each_connector_iter(connector, &iter) {
393 		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
394 
395 		if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
396 			continue;
397 
398 		tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
399 		tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 0);
400 		WREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
401 
402 		amdgpu_irq_put(adev, &adev->hpd_irq,
403 			       amdgpu_connector->hpd.hpd);
404 	}
405 	drm_connector_list_iter_end(&iter);
406 }
407 
408 static u32 dce_v10_0_hpd_get_gpio_reg(struct amdgpu_device *adev)
409 {
410 	return mmDC_GPIO_HPD_A;
411 }
412 
413 static bool dce_v10_0_is_display_hung(struct amdgpu_device *adev)
414 {
415 	u32 crtc_hung = 0;
416 	u32 crtc_status[6];
417 	u32 i, j, tmp;
418 
419 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
420 		tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
421 		if (REG_GET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN)) {
422 			crtc_status[i] = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]);
423 			crtc_hung |= (1 << i);
424 		}
425 	}
426 
427 	for (j = 0; j < 10; j++) {
428 		for (i = 0; i < adev->mode_info.num_crtc; i++) {
429 			if (crtc_hung & (1 << i)) {
430 				tmp = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]);
431 				if (tmp != crtc_status[i])
432 					crtc_hung &= ~(1 << i);
433 			}
434 		}
435 		if (crtc_hung == 0)
436 			return false;
437 		udelay(100);
438 	}
439 
440 	return true;
441 }
442 
443 static void dce_v10_0_set_vga_render_state(struct amdgpu_device *adev,
444 					   bool render)
445 {
446 	u32 tmp;
447 
448 	/* Lockout access through VGA aperture*/
449 	tmp = RREG32(mmVGA_HDP_CONTROL);
450 	if (render)
451 		tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 0);
452 	else
453 		tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
454 	WREG32(mmVGA_HDP_CONTROL, tmp);
455 
456 	/* disable VGA render */
457 	tmp = RREG32(mmVGA_RENDER_CONTROL);
458 	if (render)
459 		tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 1);
460 	else
461 		tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
462 	WREG32(mmVGA_RENDER_CONTROL, tmp);
463 }
464 
465 static int dce_v10_0_get_num_crtc(struct amdgpu_device *adev)
466 {
467 	int num_crtc = 0;
468 
469 	switch (adev->asic_type) {
470 	case CHIP_FIJI:
471 	case CHIP_TONGA:
472 		num_crtc = 6;
473 		break;
474 	default:
475 		num_crtc = 0;
476 	}
477 	return num_crtc;
478 }
479 
480 void dce_v10_0_disable_dce(struct amdgpu_device *adev)
481 {
482 	/*Disable VGA render and enabled crtc, if has DCE engine*/
483 	if (amdgpu_atombios_has_dce_engine_info(adev)) {
484 		u32 tmp;
485 		int crtc_enabled, i;
486 
487 		dce_v10_0_set_vga_render_state(adev, false);
488 
489 		/*Disable crtc*/
490 		for (i = 0; i < dce_v10_0_get_num_crtc(adev); i++) {
491 			crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]),
492 									 CRTC_CONTROL, CRTC_MASTER_EN);
493 			if (crtc_enabled) {
494 				WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
495 				tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
496 				tmp = REG_SET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN, 0);
497 				WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp);
498 				WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
499 			}
500 		}
501 	}
502 }
503 
504 static void dce_v10_0_program_fmt(struct drm_encoder *encoder)
505 {
506 	struct drm_device *dev = encoder->dev;
507 	struct amdgpu_device *adev = drm_to_adev(dev);
508 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
509 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
510 	struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
511 	int bpc = 0;
512 	u32 tmp = 0;
513 	enum amdgpu_connector_dither dither = AMDGPU_FMT_DITHER_DISABLE;
514 
515 	if (connector) {
516 		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
517 		bpc = amdgpu_connector_get_monitor_bpc(connector);
518 		dither = amdgpu_connector->dither;
519 	}
520 
521 	/* LVDS/eDP FMT is set up by atom */
522 	if (amdgpu_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
523 		return;
524 
525 	/* not needed for analog */
526 	if ((amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1) ||
527 	    (amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2))
528 		return;
529 
530 	if (bpc == 0)
531 		return;
532 
533 	switch (bpc) {
534 	case 6:
535 		if (dither == AMDGPU_FMT_DITHER_ENABLE) {
536 			/* XXX sort out optimal dither settings */
537 			tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_FRAME_RANDOM_ENABLE, 1);
538 			tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_HIGHPASS_RANDOM_ENABLE, 1);
539 			tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_EN, 1);
540 			tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_DEPTH, 0);
541 		} else {
542 			tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, 1);
543 			tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_DEPTH, 0);
544 		}
545 		break;
546 	case 8:
547 		if (dither == AMDGPU_FMT_DITHER_ENABLE) {
548 			/* XXX sort out optimal dither settings */
549 			tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_FRAME_RANDOM_ENABLE, 1);
550 			tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_HIGHPASS_RANDOM_ENABLE, 1);
551 			tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_RGB_RANDOM_ENABLE, 1);
552 			tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_EN, 1);
553 			tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_DEPTH, 1);
554 		} else {
555 			tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, 1);
556 			tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_DEPTH, 1);
557 		}
558 		break;
559 	case 10:
560 		if (dither == AMDGPU_FMT_DITHER_ENABLE) {
561 			/* XXX sort out optimal dither settings */
562 			tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_FRAME_RANDOM_ENABLE, 1);
563 			tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_HIGHPASS_RANDOM_ENABLE, 1);
564 			tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_RGB_RANDOM_ENABLE, 1);
565 			tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_EN, 1);
566 			tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_DEPTH, 2);
567 		} else {
568 			tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, 1);
569 			tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_DEPTH, 2);
570 		}
571 		break;
572 	default:
573 		/* not needed */
574 		break;
575 	}
576 
577 	WREG32(mmFMT_BIT_DEPTH_CONTROL + amdgpu_crtc->crtc_offset, tmp);
578 }
579 
580 
581 /* display watermark setup */
582 /**
583  * dce_v10_0_line_buffer_adjust - Set up the line buffer
584  *
585  * @adev: amdgpu_device pointer
586  * @amdgpu_crtc: the selected display controller
587  * @mode: the current display mode on the selected display
588  * controller
589  *
590  * Setup up the line buffer allocation for
591  * the selected display controller (CIK).
592  * Returns the line buffer size in pixels.
593  */
594 static u32 dce_v10_0_line_buffer_adjust(struct amdgpu_device *adev,
595 				       struct amdgpu_crtc *amdgpu_crtc,
596 				       struct drm_display_mode *mode)
597 {
598 	u32 tmp, buffer_alloc, i, mem_cfg;
599 	u32 pipe_offset = amdgpu_crtc->crtc_id;
600 	/*
601 	 * Line Buffer Setup
602 	 * There are 6 line buffers, one for each display controllers.
603 	 * There are 3 partitions per LB. Select the number of partitions
604 	 * to enable based on the display width.  For display widths larger
605 	 * than 4096, you need use to use 2 display controllers and combine
606 	 * them using the stereo blender.
607 	 */
608 	if (amdgpu_crtc->base.enabled && mode) {
609 		if (mode->crtc_hdisplay < 1920) {
610 			mem_cfg = 1;
611 			buffer_alloc = 2;
612 		} else if (mode->crtc_hdisplay < 2560) {
613 			mem_cfg = 2;
614 			buffer_alloc = 2;
615 		} else if (mode->crtc_hdisplay < 4096) {
616 			mem_cfg = 0;
617 			buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4;
618 		} else {
619 			DRM_DEBUG_KMS("Mode too big for LB!\n");
620 			mem_cfg = 0;
621 			buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4;
622 		}
623 	} else {
624 		mem_cfg = 1;
625 		buffer_alloc = 0;
626 	}
627 
628 	tmp = RREG32(mmLB_MEMORY_CTRL + amdgpu_crtc->crtc_offset);
629 	tmp = REG_SET_FIELD(tmp, LB_MEMORY_CTRL, LB_MEMORY_CONFIG, mem_cfg);
630 	WREG32(mmLB_MEMORY_CTRL + amdgpu_crtc->crtc_offset, tmp);
631 
632 	tmp = RREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset);
633 	tmp = REG_SET_FIELD(tmp, PIPE0_DMIF_BUFFER_CONTROL, DMIF_BUFFERS_ALLOCATED, buffer_alloc);
634 	WREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset, tmp);
635 
636 	for (i = 0; i < adev->usec_timeout; i++) {
637 		tmp = RREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset);
638 		if (REG_GET_FIELD(tmp, PIPE0_DMIF_BUFFER_CONTROL, DMIF_BUFFERS_ALLOCATION_COMPLETED))
639 			break;
640 		udelay(1);
641 	}
642 
643 	if (amdgpu_crtc->base.enabled && mode) {
644 		switch (mem_cfg) {
645 		case 0:
646 		default:
647 			return 4096 * 2;
648 		case 1:
649 			return 1920 * 2;
650 		case 2:
651 			return 2560 * 2;
652 		}
653 	}
654 
655 	/* controller not enabled, so no lb used */
656 	return 0;
657 }
658 
659 /**
660  * cik_get_number_of_dram_channels - get the number of dram channels
661  *
662  * @adev: amdgpu_device pointer
663  *
664  * Look up the number of video ram channels (CIK).
665  * Used for display watermark bandwidth calculations
666  * Returns the number of dram channels
667  */
668 static u32 cik_get_number_of_dram_channels(struct amdgpu_device *adev)
669 {
670 	u32 tmp = RREG32(mmMC_SHARED_CHMAP);
671 
672 	switch (REG_GET_FIELD(tmp, MC_SHARED_CHMAP, NOOFCHAN)) {
673 	case 0:
674 	default:
675 		return 1;
676 	case 1:
677 		return 2;
678 	case 2:
679 		return 4;
680 	case 3:
681 		return 8;
682 	case 4:
683 		return 3;
684 	case 5:
685 		return 6;
686 	case 6:
687 		return 10;
688 	case 7:
689 		return 12;
690 	case 8:
691 		return 16;
692 	}
693 }
694 
695 struct dce10_wm_params {
696 	u32 dram_channels; /* number of dram channels */
697 	u32 yclk;          /* bandwidth per dram data pin in kHz */
698 	u32 sclk;          /* engine clock in kHz */
699 	u32 disp_clk;      /* display clock in kHz */
700 	u32 src_width;     /* viewport width */
701 	u32 active_time;   /* active display time in ns */
702 	u32 blank_time;    /* blank time in ns */
703 	bool interlaced;    /* mode is interlaced */
704 	fixed20_12 vsc;    /* vertical scale ratio */
705 	u32 num_heads;     /* number of active crtcs */
706 	u32 bytes_per_pixel; /* bytes per pixel display + overlay */
707 	u32 lb_size;       /* line buffer allocated to pipe */
708 	u32 vtaps;         /* vertical scaler taps */
709 };
710 
711 /**
712  * dce_v10_0_dram_bandwidth - get the dram bandwidth
713  *
714  * @wm: watermark calculation data
715  *
716  * Calculate the raw dram bandwidth (CIK).
717  * Used for display watermark bandwidth calculations
718  * Returns the dram bandwidth in MBytes/s
719  */
720 static u32 dce_v10_0_dram_bandwidth(struct dce10_wm_params *wm)
721 {
722 	/* Calculate raw DRAM Bandwidth */
723 	fixed20_12 dram_efficiency; /* 0.7 */
724 	fixed20_12 yclk, dram_channels, bandwidth;
725 	fixed20_12 a;
726 
727 	a.full = dfixed_const(1000);
728 	yclk.full = dfixed_const(wm->yclk);
729 	yclk.full = dfixed_div(yclk, a);
730 	dram_channels.full = dfixed_const(wm->dram_channels * 4);
731 	a.full = dfixed_const(10);
732 	dram_efficiency.full = dfixed_const(7);
733 	dram_efficiency.full = dfixed_div(dram_efficiency, a);
734 	bandwidth.full = dfixed_mul(dram_channels, yclk);
735 	bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);
736 
737 	return dfixed_trunc(bandwidth);
738 }
739 
740 /**
741  * dce_v10_0_dram_bandwidth_for_display - get the dram bandwidth for display
742  *
743  * @wm: watermark calculation data
744  *
745  * Calculate the dram bandwidth used for display (CIK).
746  * Used for display watermark bandwidth calculations
747  * Returns the dram bandwidth for display in MBytes/s
748  */
749 static u32 dce_v10_0_dram_bandwidth_for_display(struct dce10_wm_params *wm)
750 {
751 	/* Calculate DRAM Bandwidth and the part allocated to display. */
752 	fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */
753 	fixed20_12 yclk, dram_channels, bandwidth;
754 	fixed20_12 a;
755 
756 	a.full = dfixed_const(1000);
757 	yclk.full = dfixed_const(wm->yclk);
758 	yclk.full = dfixed_div(yclk, a);
759 	dram_channels.full = dfixed_const(wm->dram_channels * 4);
760 	a.full = dfixed_const(10);
761 	disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */
762 	disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
763 	bandwidth.full = dfixed_mul(dram_channels, yclk);
764 	bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);
765 
766 	return dfixed_trunc(bandwidth);
767 }
768 
769 /**
770  * dce_v10_0_data_return_bandwidth - get the data return bandwidth
771  *
772  * @wm: watermark calculation data
773  *
774  * Calculate the data return bandwidth used for display (CIK).
775  * Used for display watermark bandwidth calculations
776  * Returns the data return bandwidth in MBytes/s
777  */
778 static u32 dce_v10_0_data_return_bandwidth(struct dce10_wm_params *wm)
779 {
780 	/* Calculate the display Data return Bandwidth */
781 	fixed20_12 return_efficiency; /* 0.8 */
782 	fixed20_12 sclk, bandwidth;
783 	fixed20_12 a;
784 
785 	a.full = dfixed_const(1000);
786 	sclk.full = dfixed_const(wm->sclk);
787 	sclk.full = dfixed_div(sclk, a);
788 	a.full = dfixed_const(10);
789 	return_efficiency.full = dfixed_const(8);
790 	return_efficiency.full = dfixed_div(return_efficiency, a);
791 	a.full = dfixed_const(32);
792 	bandwidth.full = dfixed_mul(a, sclk);
793 	bandwidth.full = dfixed_mul(bandwidth, return_efficiency);
794 
795 	return dfixed_trunc(bandwidth);
796 }
797 
798 /**
799  * dce_v10_0_dmif_request_bandwidth - get the dmif bandwidth
800  *
801  * @wm: watermark calculation data
802  *
803  * Calculate the dmif bandwidth used for display (CIK).
804  * Used for display watermark bandwidth calculations
805  * Returns the dmif bandwidth in MBytes/s
806  */
807 static u32 dce_v10_0_dmif_request_bandwidth(struct dce10_wm_params *wm)
808 {
809 	/* Calculate the DMIF Request Bandwidth */
810 	fixed20_12 disp_clk_request_efficiency; /* 0.8 */
811 	fixed20_12 disp_clk, bandwidth;
812 	fixed20_12 a, b;
813 
814 	a.full = dfixed_const(1000);
815 	disp_clk.full = dfixed_const(wm->disp_clk);
816 	disp_clk.full = dfixed_div(disp_clk, a);
817 	a.full = dfixed_const(32);
818 	b.full = dfixed_mul(a, disp_clk);
819 
820 	a.full = dfixed_const(10);
821 	disp_clk_request_efficiency.full = dfixed_const(8);
822 	disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
823 
824 	bandwidth.full = dfixed_mul(b, disp_clk_request_efficiency);
825 
826 	return dfixed_trunc(bandwidth);
827 }
828 
829 /**
830  * dce_v10_0_available_bandwidth - get the min available bandwidth
831  *
832  * @wm: watermark calculation data
833  *
834  * Calculate the min available bandwidth used for display (CIK).
835  * Used for display watermark bandwidth calculations
836  * Returns the min available bandwidth in MBytes/s
837  */
838 static u32 dce_v10_0_available_bandwidth(struct dce10_wm_params *wm)
839 {
840 	/* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
841 	u32 dram_bandwidth = dce_v10_0_dram_bandwidth(wm);
842 	u32 data_return_bandwidth = dce_v10_0_data_return_bandwidth(wm);
843 	u32 dmif_req_bandwidth = dce_v10_0_dmif_request_bandwidth(wm);
844 
845 	return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
846 }
847 
848 /**
849  * dce_v10_0_average_bandwidth - get the average available bandwidth
850  *
851  * @wm: watermark calculation data
852  *
853  * Calculate the average available bandwidth used for display (CIK).
854  * Used for display watermark bandwidth calculations
855  * Returns the average available bandwidth in MBytes/s
856  */
857 static u32 dce_v10_0_average_bandwidth(struct dce10_wm_params *wm)
858 {
859 	/* Calculate the display mode Average Bandwidth
860 	 * DisplayMode should contain the source and destination dimensions,
861 	 * timing, etc.
862 	 */
863 	fixed20_12 bpp;
864 	fixed20_12 line_time;
865 	fixed20_12 src_width;
866 	fixed20_12 bandwidth;
867 	fixed20_12 a;
868 
869 	a.full = dfixed_const(1000);
870 	line_time.full = dfixed_const(wm->active_time + wm->blank_time);
871 	line_time.full = dfixed_div(line_time, a);
872 	bpp.full = dfixed_const(wm->bytes_per_pixel);
873 	src_width.full = dfixed_const(wm->src_width);
874 	bandwidth.full = dfixed_mul(src_width, bpp);
875 	bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
876 	bandwidth.full = dfixed_div(bandwidth, line_time);
877 
878 	return dfixed_trunc(bandwidth);
879 }
880 
881 /**
882  * dce_v10_0_latency_watermark - get the latency watermark
883  *
884  * @wm: watermark calculation data
885  *
886  * Calculate the latency watermark (CIK).
887  * Used for display watermark bandwidth calculations
888  * Returns the latency watermark in ns
889  */
890 static u32 dce_v10_0_latency_watermark(struct dce10_wm_params *wm)
891 {
892 	/* First calculate the latency in ns */
893 	u32 mc_latency = 2000; /* 2000 ns. */
894 	u32 available_bandwidth = dce_v10_0_available_bandwidth(wm);
895 	u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
896 	u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
897 	u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */
898 	u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
899 		(wm->num_heads * cursor_line_pair_return_time);
900 	u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
901 	u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
902 	u32 tmp, dmif_size = 12288;
903 	fixed20_12 a, b, c;
904 
905 	if (wm->num_heads == 0)
906 		return 0;
907 
908 	a.full = dfixed_const(2);
909 	b.full = dfixed_const(1);
910 	if ((wm->vsc.full > a.full) ||
911 	    ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
912 	    (wm->vtaps >= 5) ||
913 	    ((wm->vsc.full >= a.full) && wm->interlaced))
914 		max_src_lines_per_dst_line = 4;
915 	else
916 		max_src_lines_per_dst_line = 2;
917 
918 	a.full = dfixed_const(available_bandwidth);
919 	b.full = dfixed_const(wm->num_heads);
920 	a.full = dfixed_div(a, b);
921 	tmp = div_u64((u64) dmif_size * (u64) wm->disp_clk, mc_latency + 512);
922 	tmp = min(dfixed_trunc(a), tmp);
923 
924 	lb_fill_bw = min(tmp, wm->disp_clk * wm->bytes_per_pixel / 1000);
925 
926 	a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
927 	b.full = dfixed_const(1000);
928 	c.full = dfixed_const(lb_fill_bw);
929 	b.full = dfixed_div(c, b);
930 	a.full = dfixed_div(a, b);
931 	line_fill_time = dfixed_trunc(a);
932 
933 	if (line_fill_time < wm->active_time)
934 		return latency;
935 	else
936 		return latency + (line_fill_time - wm->active_time);
937 
938 }
939 
940 /**
941  * dce_v10_0_average_bandwidth_vs_dram_bandwidth_for_display - check
942  * average and available dram bandwidth
943  *
944  * @wm: watermark calculation data
945  *
946  * Check if the display average bandwidth fits in the display
947  * dram bandwidth (CIK).
948  * Used for display watermark bandwidth calculations
949  * Returns true if the display fits, false if not.
950  */
951 static bool dce_v10_0_average_bandwidth_vs_dram_bandwidth_for_display(struct dce10_wm_params *wm)
952 {
953 	if (dce_v10_0_average_bandwidth(wm) <=
954 	    (dce_v10_0_dram_bandwidth_for_display(wm) / wm->num_heads))
955 		return true;
956 	else
957 		return false;
958 }
959 
960 /**
961  * dce_v10_0_average_bandwidth_vs_available_bandwidth - check
962  * average and available bandwidth
963  *
964  * @wm: watermark calculation data
965  *
966  * Check if the display average bandwidth fits in the display
967  * available bandwidth (CIK).
968  * Used for display watermark bandwidth calculations
969  * Returns true if the display fits, false if not.
970  */
971 static bool dce_v10_0_average_bandwidth_vs_available_bandwidth(struct dce10_wm_params *wm)
972 {
973 	if (dce_v10_0_average_bandwidth(wm) <=
974 	    (dce_v10_0_available_bandwidth(wm) / wm->num_heads))
975 		return true;
976 	else
977 		return false;
978 }
979 
980 /**
981  * dce_v10_0_check_latency_hiding - check latency hiding
982  *
983  * @wm: watermark calculation data
984  *
985  * Check latency hiding (CIK).
986  * Used for display watermark bandwidth calculations
987  * Returns true if the display fits, false if not.
988  */
989 static bool dce_v10_0_check_latency_hiding(struct dce10_wm_params *wm)
990 {
991 	u32 lb_partitions = wm->lb_size / wm->src_width;
992 	u32 line_time = wm->active_time + wm->blank_time;
993 	u32 latency_tolerant_lines;
994 	u32 latency_hiding;
995 	fixed20_12 a;
996 
997 	a.full = dfixed_const(1);
998 	if (wm->vsc.full > a.full)
999 		latency_tolerant_lines = 1;
1000 	else {
1001 		if (lb_partitions <= (wm->vtaps + 1))
1002 			latency_tolerant_lines = 1;
1003 		else
1004 			latency_tolerant_lines = 2;
1005 	}
1006 
1007 	latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
1008 
1009 	if (dce_v10_0_latency_watermark(wm) <= latency_hiding)
1010 		return true;
1011 	else
1012 		return false;
1013 }
1014 
1015 /**
1016  * dce_v10_0_program_watermarks - program display watermarks
1017  *
1018  * @adev: amdgpu_device pointer
1019  * @amdgpu_crtc: the selected display controller
1020  * @lb_size: line buffer size
1021  * @num_heads: number of display controllers in use
1022  *
1023  * Calculate and program the display watermarks for the
1024  * selected display controller (CIK).
1025  */
1026 static void dce_v10_0_program_watermarks(struct amdgpu_device *adev,
1027 					struct amdgpu_crtc *amdgpu_crtc,
1028 					u32 lb_size, u32 num_heads)
1029 {
1030 	struct drm_display_mode *mode = &amdgpu_crtc->base.mode;
1031 	struct dce10_wm_params wm_low, wm_high;
1032 	u32 active_time;
1033 	u32 line_time = 0;
1034 	u32 latency_watermark_a = 0, latency_watermark_b = 0;
1035 	u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
1036 
1037 	if (amdgpu_crtc->base.enabled && num_heads && mode) {
1038 		active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
1039 					    (u32)mode->clock);
1040 		line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
1041 					  (u32)mode->clock);
1042 		line_time = min_t(u32, line_time, 65535);
1043 
1044 		/* watermark for high clocks */
1045 		if (adev->pm.dpm_enabled) {
1046 			wm_high.yclk =
1047 				amdgpu_dpm_get_mclk(adev, false) * 10;
1048 			wm_high.sclk =
1049 				amdgpu_dpm_get_sclk(adev, false) * 10;
1050 		} else {
1051 			wm_high.yclk = adev->pm.current_mclk * 10;
1052 			wm_high.sclk = adev->pm.current_sclk * 10;
1053 		}
1054 
1055 		wm_high.disp_clk = mode->clock;
1056 		wm_high.src_width = mode->crtc_hdisplay;
1057 		wm_high.active_time = active_time;
1058 		wm_high.blank_time = line_time - wm_high.active_time;
1059 		wm_high.interlaced = false;
1060 		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1061 			wm_high.interlaced = true;
1062 		wm_high.vsc = amdgpu_crtc->vsc;
1063 		wm_high.vtaps = 1;
1064 		if (amdgpu_crtc->rmx_type != RMX_OFF)
1065 			wm_high.vtaps = 2;
1066 		wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */
1067 		wm_high.lb_size = lb_size;
1068 		wm_high.dram_channels = cik_get_number_of_dram_channels(adev);
1069 		wm_high.num_heads = num_heads;
1070 
1071 		/* set for high clocks */
1072 		latency_watermark_a = min_t(u32, dce_v10_0_latency_watermark(&wm_high), 65535);
1073 
1074 		/* possibly force display priority to high */
1075 		/* should really do this at mode validation time... */
1076 		if (!dce_v10_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) ||
1077 		    !dce_v10_0_average_bandwidth_vs_available_bandwidth(&wm_high) ||
1078 		    !dce_v10_0_check_latency_hiding(&wm_high) ||
1079 		    (adev->mode_info.disp_priority == 2)) {
1080 			DRM_DEBUG_KMS("force priority to high\n");
1081 		}
1082 
1083 		/* watermark for low clocks */
1084 		if (adev->pm.dpm_enabled) {
1085 			wm_low.yclk =
1086 				amdgpu_dpm_get_mclk(adev, true) * 10;
1087 			wm_low.sclk =
1088 				amdgpu_dpm_get_sclk(adev, true) * 10;
1089 		} else {
1090 			wm_low.yclk = adev->pm.current_mclk * 10;
1091 			wm_low.sclk = adev->pm.current_sclk * 10;
1092 		}
1093 
1094 		wm_low.disp_clk = mode->clock;
1095 		wm_low.src_width = mode->crtc_hdisplay;
1096 		wm_low.active_time = active_time;
1097 		wm_low.blank_time = line_time - wm_low.active_time;
1098 		wm_low.interlaced = false;
1099 		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1100 			wm_low.interlaced = true;
1101 		wm_low.vsc = amdgpu_crtc->vsc;
1102 		wm_low.vtaps = 1;
1103 		if (amdgpu_crtc->rmx_type != RMX_OFF)
1104 			wm_low.vtaps = 2;
1105 		wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */
1106 		wm_low.lb_size = lb_size;
1107 		wm_low.dram_channels = cik_get_number_of_dram_channels(adev);
1108 		wm_low.num_heads = num_heads;
1109 
1110 		/* set for low clocks */
1111 		latency_watermark_b = min_t(u32, dce_v10_0_latency_watermark(&wm_low), 65535);
1112 
1113 		/* possibly force display priority to high */
1114 		/* should really do this at mode validation time... */
1115 		if (!dce_v10_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) ||
1116 		    !dce_v10_0_average_bandwidth_vs_available_bandwidth(&wm_low) ||
1117 		    !dce_v10_0_check_latency_hiding(&wm_low) ||
1118 		    (adev->mode_info.disp_priority == 2)) {
1119 			DRM_DEBUG_KMS("force priority to high\n");
1120 		}
1121 		lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay);
1122 	}
1123 
1124 	/* select wm A */
1125 	wm_mask = RREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset);
1126 	tmp = REG_SET_FIELD(wm_mask, DPG_WATERMARK_MASK_CONTROL, URGENCY_WATERMARK_MASK, 1);
1127 	WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp);
1128 	tmp = RREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset);
1129 	tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_LOW_WATERMARK, latency_watermark_a);
1130 	tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_HIGH_WATERMARK, line_time);
1131 	WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset, tmp);
1132 	/* select wm B */
1133 	tmp = REG_SET_FIELD(wm_mask, DPG_WATERMARK_MASK_CONTROL, URGENCY_WATERMARK_MASK, 2);
1134 	WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp);
1135 	tmp = RREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset);
1136 	tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_LOW_WATERMARK, latency_watermark_b);
1137 	tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_HIGH_WATERMARK, line_time);
1138 	WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset, tmp);
1139 	/* restore original selection */
1140 	WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, wm_mask);
1141 
1142 	/* save values for DPM */
1143 	amdgpu_crtc->line_time = line_time;
1144 	amdgpu_crtc->wm_high = latency_watermark_a;
1145 	amdgpu_crtc->wm_low = latency_watermark_b;
1146 	/* Save number of lines the linebuffer leads before the scanout */
1147 	amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines;
1148 }
1149 
1150 /**
1151  * dce_v10_0_bandwidth_update - program display watermarks
1152  *
1153  * @adev: amdgpu_device pointer
1154  *
1155  * Calculate and program the display watermarks and line
1156  * buffer allocation (CIK).
1157  */
1158 static void dce_v10_0_bandwidth_update(struct amdgpu_device *adev)
1159 {
1160 	struct drm_display_mode *mode = NULL;
1161 	u32 num_heads = 0, lb_size;
1162 	int i;
1163 
1164 	amdgpu_display_update_priority(adev);
1165 
1166 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
1167 		if (adev->mode_info.crtcs[i]->base.enabled)
1168 			num_heads++;
1169 	}
1170 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
1171 		mode = &adev->mode_info.crtcs[i]->base.mode;
1172 		lb_size = dce_v10_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i], mode);
1173 		dce_v10_0_program_watermarks(adev, adev->mode_info.crtcs[i],
1174 					    lb_size, num_heads);
1175 	}
1176 }
1177 
1178 static void dce_v10_0_audio_get_connected_pins(struct amdgpu_device *adev)
1179 {
1180 	int i;
1181 	u32 offset, tmp;
1182 
1183 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1184 		offset = adev->mode_info.audio.pin[i].offset;
1185 		tmp = RREG32_AUDIO_ENDPT(offset,
1186 					 ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT);
1187 		if (((tmp &
1188 		AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK) >>
1189 		AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT) == 1)
1190 			adev->mode_info.audio.pin[i].connected = false;
1191 		else
1192 			adev->mode_info.audio.pin[i].connected = true;
1193 	}
1194 }
1195 
1196 static struct amdgpu_audio_pin *dce_v10_0_audio_get_pin(struct amdgpu_device *adev)
1197 {
1198 	int i;
1199 
1200 	dce_v10_0_audio_get_connected_pins(adev);
1201 
1202 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1203 		if (adev->mode_info.audio.pin[i].connected)
1204 			return &adev->mode_info.audio.pin[i];
1205 	}
1206 	DRM_ERROR("No connected audio pins found!\n");
1207 	return NULL;
1208 }
1209 
1210 static void dce_v10_0_afmt_audio_select_pin(struct drm_encoder *encoder)
1211 {
1212 	struct amdgpu_device *adev = drm_to_adev(encoder->dev);
1213 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1214 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1215 	u32 tmp;
1216 
1217 	if (!dig || !dig->afmt || !dig->afmt->pin)
1218 		return;
1219 
1220 	tmp = RREG32(mmAFMT_AUDIO_SRC_CONTROL + dig->afmt->offset);
1221 	tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_SRC_CONTROL, AFMT_AUDIO_SRC_SELECT, dig->afmt->pin->id);
1222 	WREG32(mmAFMT_AUDIO_SRC_CONTROL + dig->afmt->offset, tmp);
1223 }
1224 
1225 static void dce_v10_0_audio_write_latency_fields(struct drm_encoder *encoder,
1226 						struct drm_display_mode *mode)
1227 {
1228 	struct drm_device *dev = encoder->dev;
1229 	struct amdgpu_device *adev = drm_to_adev(dev);
1230 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1231 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1232 	struct drm_connector *connector;
1233 	struct drm_connector_list_iter iter;
1234 	struct amdgpu_connector *amdgpu_connector = NULL;
1235 	u32 tmp;
1236 	int interlace = 0;
1237 
1238 	if (!dig || !dig->afmt || !dig->afmt->pin)
1239 		return;
1240 
1241 	drm_connector_list_iter_begin(dev, &iter);
1242 	drm_for_each_connector_iter(connector, &iter) {
1243 		if (connector->encoder == encoder) {
1244 			amdgpu_connector = to_amdgpu_connector(connector);
1245 			break;
1246 		}
1247 	}
1248 	drm_connector_list_iter_end(&iter);
1249 
1250 	if (!amdgpu_connector) {
1251 		DRM_ERROR("Couldn't find encoder's connector\n");
1252 		return;
1253 	}
1254 
1255 	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1256 		interlace = 1;
1257 	if (connector->latency_present[interlace]) {
1258 		tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
1259 				    VIDEO_LIPSYNC, connector->video_latency[interlace]);
1260 		tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
1261 				    AUDIO_LIPSYNC, connector->audio_latency[interlace]);
1262 	} else {
1263 		tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
1264 				    VIDEO_LIPSYNC, 0);
1265 		tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
1266 				    AUDIO_LIPSYNC, 0);
1267 	}
1268 	WREG32_AUDIO_ENDPT(dig->afmt->pin->offset,
1269 			   ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp);
1270 }
1271 
1272 static void dce_v10_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
1273 {
1274 	struct drm_device *dev = encoder->dev;
1275 	struct amdgpu_device *adev = drm_to_adev(dev);
1276 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1277 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1278 	struct drm_connector *connector;
1279 	struct drm_connector_list_iter iter;
1280 	struct amdgpu_connector *amdgpu_connector = NULL;
1281 	u32 tmp;
1282 	u8 *sadb = NULL;
1283 	int sad_count;
1284 
1285 	if (!dig || !dig->afmt || !dig->afmt->pin)
1286 		return;
1287 
1288 	drm_connector_list_iter_begin(dev, &iter);
1289 	drm_for_each_connector_iter(connector, &iter) {
1290 		if (connector->encoder == encoder) {
1291 			amdgpu_connector = to_amdgpu_connector(connector);
1292 			break;
1293 		}
1294 	}
1295 	drm_connector_list_iter_end(&iter);
1296 
1297 	if (!amdgpu_connector) {
1298 		DRM_ERROR("Couldn't find encoder's connector\n");
1299 		return;
1300 	}
1301 
1302 	sad_count = drm_edid_to_speaker_allocation(amdgpu_connector->edid, &sadb);
1303 	if (sad_count < 0) {
1304 		DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
1305 		sad_count = 0;
1306 	}
1307 
1308 	/* program the speaker allocation */
1309 	tmp = RREG32_AUDIO_ENDPT(dig->afmt->pin->offset,
1310 				 ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
1311 	tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
1312 			    DP_CONNECTION, 0);
1313 	/* set HDMI mode */
1314 	tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
1315 			    HDMI_CONNECTION, 1);
1316 	if (sad_count)
1317 		tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
1318 				    SPEAKER_ALLOCATION, sadb[0]);
1319 	else
1320 		tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
1321 				    SPEAKER_ALLOCATION, 5); /* stereo */
1322 	WREG32_AUDIO_ENDPT(dig->afmt->pin->offset,
1323 			   ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
1324 
1325 	kfree(sadb);
1326 }
1327 
1328 static void dce_v10_0_audio_write_sad_regs(struct drm_encoder *encoder)
1329 {
1330 	struct drm_device *dev = encoder->dev;
1331 	struct amdgpu_device *adev = drm_to_adev(dev);
1332 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1333 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1334 	struct drm_connector *connector;
1335 	struct drm_connector_list_iter iter;
1336 	struct amdgpu_connector *amdgpu_connector = NULL;
1337 	struct cea_sad *sads;
1338 	int i, sad_count;
1339 
1340 	static const u16 eld_reg_to_type[][2] = {
1341 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, HDMI_AUDIO_CODING_TYPE_PCM },
1342 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1, HDMI_AUDIO_CODING_TYPE_AC3 },
1343 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2, HDMI_AUDIO_CODING_TYPE_MPEG1 },
1344 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3, HDMI_AUDIO_CODING_TYPE_MP3 },
1345 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4, HDMI_AUDIO_CODING_TYPE_MPEG2 },
1346 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5, HDMI_AUDIO_CODING_TYPE_AAC_LC },
1347 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6, HDMI_AUDIO_CODING_TYPE_DTS },
1348 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7, HDMI_AUDIO_CODING_TYPE_ATRAC },
1349 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9, HDMI_AUDIO_CODING_TYPE_EAC3 },
1350 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10, HDMI_AUDIO_CODING_TYPE_DTS_HD },
1351 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11, HDMI_AUDIO_CODING_TYPE_MLP },
1352 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
1353 	};
1354 
1355 	if (!dig || !dig->afmt || !dig->afmt->pin)
1356 		return;
1357 
1358 	drm_connector_list_iter_begin(dev, &iter);
1359 	drm_for_each_connector_iter(connector, &iter) {
1360 		if (connector->encoder == encoder) {
1361 			amdgpu_connector = to_amdgpu_connector(connector);
1362 			break;
1363 		}
1364 	}
1365 	drm_connector_list_iter_end(&iter);
1366 
1367 	if (!amdgpu_connector) {
1368 		DRM_ERROR("Couldn't find encoder's connector\n");
1369 		return;
1370 	}
1371 
1372 	sad_count = drm_edid_to_sad(amdgpu_connector->edid, &sads);
1373 	if (sad_count < 0)
1374 		DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
1375 	if (sad_count <= 0)
1376 		return;
1377 	BUG_ON(!sads);
1378 
1379 	for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
1380 		u32 tmp = 0;
1381 		u8 stereo_freqs = 0;
1382 		int max_channels = -1;
1383 		int j;
1384 
1385 		for (j = 0; j < sad_count; j++) {
1386 			struct cea_sad *sad = &sads[j];
1387 
1388 			if (sad->format == eld_reg_to_type[i][1]) {
1389 				if (sad->channels > max_channels) {
1390 					tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
1391 							    MAX_CHANNELS, sad->channels);
1392 					tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
1393 							    DESCRIPTOR_BYTE_2, sad->byte2);
1394 					tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
1395 							    SUPPORTED_FREQUENCIES, sad->freq);
1396 					max_channels = sad->channels;
1397 				}
1398 
1399 				if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM)
1400 					stereo_freqs |= sad->freq;
1401 				else
1402 					break;
1403 			}
1404 		}
1405 
1406 		tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
1407 				    SUPPORTED_FREQUENCIES_STEREO, stereo_freqs);
1408 		WREG32_AUDIO_ENDPT(dig->afmt->pin->offset, eld_reg_to_type[i][0], tmp);
1409 	}
1410 
1411 	kfree(sads);
1412 }
1413 
1414 static void dce_v10_0_audio_enable(struct amdgpu_device *adev,
1415 				  struct amdgpu_audio_pin *pin,
1416 				  bool enable)
1417 {
1418 	if (!pin)
1419 		return;
1420 
1421 	WREG32_AUDIO_ENDPT(pin->offset, ixAZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
1422 			   enable ? AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK : 0);
1423 }
1424 
1425 static const u32 pin_offsets[] = {
1426 	AUD0_REGISTER_OFFSET,
1427 	AUD1_REGISTER_OFFSET,
1428 	AUD2_REGISTER_OFFSET,
1429 	AUD3_REGISTER_OFFSET,
1430 	AUD4_REGISTER_OFFSET,
1431 	AUD5_REGISTER_OFFSET,
1432 	AUD6_REGISTER_OFFSET,
1433 };
1434 
1435 static int dce_v10_0_audio_init(struct amdgpu_device *adev)
1436 {
1437 	int i;
1438 
1439 	if (!amdgpu_audio)
1440 		return 0;
1441 
1442 	adev->mode_info.audio.enabled = true;
1443 
1444 	adev->mode_info.audio.num_pins = 7;
1445 
1446 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1447 		adev->mode_info.audio.pin[i].channels = -1;
1448 		adev->mode_info.audio.pin[i].rate = -1;
1449 		adev->mode_info.audio.pin[i].bits_per_sample = -1;
1450 		adev->mode_info.audio.pin[i].status_bits = 0;
1451 		adev->mode_info.audio.pin[i].category_code = 0;
1452 		adev->mode_info.audio.pin[i].connected = false;
1453 		adev->mode_info.audio.pin[i].offset = pin_offsets[i];
1454 		adev->mode_info.audio.pin[i].id = i;
1455 		/* disable audio.  it will be set up later */
1456 		/* XXX remove once we switch to ip funcs */
1457 		dce_v10_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
1458 	}
1459 
1460 	return 0;
1461 }
1462 
1463 static void dce_v10_0_audio_fini(struct amdgpu_device *adev)
1464 {
1465 	int i;
1466 
1467 	if (!amdgpu_audio)
1468 		return;
1469 
1470 	if (!adev->mode_info.audio.enabled)
1471 		return;
1472 
1473 	for (i = 0; i < adev->mode_info.audio.num_pins; i++)
1474 		dce_v10_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
1475 
1476 	adev->mode_info.audio.enabled = false;
1477 }
1478 
1479 /*
1480  * update the N and CTS parameters for a given pixel clock rate
1481  */
1482 static void dce_v10_0_afmt_update_ACR(struct drm_encoder *encoder, uint32_t clock)
1483 {
1484 	struct drm_device *dev = encoder->dev;
1485 	struct amdgpu_device *adev = drm_to_adev(dev);
1486 	struct amdgpu_afmt_acr acr = amdgpu_afmt_acr(clock);
1487 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1488 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1489 	u32 tmp;
1490 
1491 	tmp = RREG32(mmHDMI_ACR_32_0 + dig->afmt->offset);
1492 	tmp = REG_SET_FIELD(tmp, HDMI_ACR_32_0, HDMI_ACR_CTS_32, acr.cts_32khz);
1493 	WREG32(mmHDMI_ACR_32_0 + dig->afmt->offset, tmp);
1494 	tmp = RREG32(mmHDMI_ACR_32_1 + dig->afmt->offset);
1495 	tmp = REG_SET_FIELD(tmp, HDMI_ACR_32_1, HDMI_ACR_N_32, acr.n_32khz);
1496 	WREG32(mmHDMI_ACR_32_1 + dig->afmt->offset, tmp);
1497 
1498 	tmp = RREG32(mmHDMI_ACR_44_0 + dig->afmt->offset);
1499 	tmp = REG_SET_FIELD(tmp, HDMI_ACR_44_0, HDMI_ACR_CTS_44, acr.cts_44_1khz);
1500 	WREG32(mmHDMI_ACR_44_0 + dig->afmt->offset, tmp);
1501 	tmp = RREG32(mmHDMI_ACR_44_1 + dig->afmt->offset);
1502 	tmp = REG_SET_FIELD(tmp, HDMI_ACR_44_1, HDMI_ACR_N_44, acr.n_44_1khz);
1503 	WREG32(mmHDMI_ACR_44_1 + dig->afmt->offset, tmp);
1504 
1505 	tmp = RREG32(mmHDMI_ACR_48_0 + dig->afmt->offset);
1506 	tmp = REG_SET_FIELD(tmp, HDMI_ACR_48_0, HDMI_ACR_CTS_48, acr.cts_48khz);
1507 	WREG32(mmHDMI_ACR_48_0 + dig->afmt->offset, tmp);
1508 	tmp = RREG32(mmHDMI_ACR_48_1 + dig->afmt->offset);
1509 	tmp = REG_SET_FIELD(tmp, HDMI_ACR_48_1, HDMI_ACR_N_48, acr.n_48khz);
1510 	WREG32(mmHDMI_ACR_48_1 + dig->afmt->offset, tmp);
1511 
1512 }
1513 
1514 /*
1515  * build a HDMI Video Info Frame
1516  */
1517 static void dce_v10_0_afmt_update_avi_infoframe(struct drm_encoder *encoder,
1518 					       void *buffer, size_t size)
1519 {
1520 	struct drm_device *dev = encoder->dev;
1521 	struct amdgpu_device *adev = drm_to_adev(dev);
1522 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1523 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1524 	uint8_t *frame = buffer + 3;
1525 	uint8_t *header = buffer;
1526 
1527 	WREG32(mmAFMT_AVI_INFO0 + dig->afmt->offset,
1528 		frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
1529 	WREG32(mmAFMT_AVI_INFO1 + dig->afmt->offset,
1530 		frame[0x4] | (frame[0x5] << 8) | (frame[0x6] << 16) | (frame[0x7] << 24));
1531 	WREG32(mmAFMT_AVI_INFO2 + dig->afmt->offset,
1532 		frame[0x8] | (frame[0x9] << 8) | (frame[0xA] << 16) | (frame[0xB] << 24));
1533 	WREG32(mmAFMT_AVI_INFO3 + dig->afmt->offset,
1534 		frame[0xC] | (frame[0xD] << 8) | (header[1] << 24));
1535 }
1536 
1537 static void dce_v10_0_audio_set_dto(struct drm_encoder *encoder, u32 clock)
1538 {
1539 	struct drm_device *dev = encoder->dev;
1540 	struct amdgpu_device *adev = drm_to_adev(dev);
1541 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1542 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1543 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
1544 	u32 dto_phase = 24 * 1000;
1545 	u32 dto_modulo = clock;
1546 	u32 tmp;
1547 
1548 	if (!dig || !dig->afmt)
1549 		return;
1550 
1551 	/* XXX two dtos; generally use dto0 for hdmi */
1552 	/* Express [24MHz / target pixel clock] as an exact rational
1553 	 * number (coefficient of two integer numbers.  DCCG_AUDIO_DTOx_PHASE
1554 	 * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
1555 	 */
1556 	tmp = RREG32(mmDCCG_AUDIO_DTO_SOURCE);
1557 	tmp = REG_SET_FIELD(tmp, DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO0_SOURCE_SEL,
1558 			    amdgpu_crtc->crtc_id);
1559 	WREG32(mmDCCG_AUDIO_DTO_SOURCE, tmp);
1560 	WREG32(mmDCCG_AUDIO_DTO0_PHASE, dto_phase);
1561 	WREG32(mmDCCG_AUDIO_DTO0_MODULE, dto_modulo);
1562 }
1563 
1564 /*
1565  * update the info frames with the data from the current display mode
1566  */
1567 static void dce_v10_0_afmt_setmode(struct drm_encoder *encoder,
1568 				  struct drm_display_mode *mode)
1569 {
1570 	struct drm_device *dev = encoder->dev;
1571 	struct amdgpu_device *adev = drm_to_adev(dev);
1572 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1573 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1574 	struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
1575 	u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
1576 	struct hdmi_avi_infoframe frame;
1577 	ssize_t err;
1578 	u32 tmp;
1579 	int bpc = 8;
1580 
1581 	if (!dig || !dig->afmt)
1582 		return;
1583 
1584 	/* Silent, r600_hdmi_enable will raise WARN for us */
1585 	if (!dig->afmt->enabled)
1586 		return;
1587 
1588 	/* hdmi deep color mode general control packets setup, if bpc > 8 */
1589 	if (encoder->crtc) {
1590 		struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
1591 		bpc = amdgpu_crtc->bpc;
1592 	}
1593 
1594 	/* disable audio prior to setting up hw */
1595 	dig->afmt->pin = dce_v10_0_audio_get_pin(adev);
1596 	dce_v10_0_audio_enable(adev, dig->afmt->pin, false);
1597 
1598 	dce_v10_0_audio_set_dto(encoder, mode->clock);
1599 
1600 	tmp = RREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset);
1601 	tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_NULL_SEND, 1);
1602 	WREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset, tmp); /* send null packets when required */
1603 
1604 	WREG32(mmAFMT_AUDIO_CRC_CONTROL + dig->afmt->offset, 0x1000);
1605 
1606 	tmp = RREG32(mmHDMI_CONTROL + dig->afmt->offset);
1607 	switch (bpc) {
1608 	case 0:
1609 	case 6:
1610 	case 8:
1611 	case 16:
1612 	default:
1613 		tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_ENABLE, 0);
1614 		tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 0);
1615 		DRM_DEBUG("%s: Disabling hdmi deep color for %d bpc.\n",
1616 			  connector->name, bpc);
1617 		break;
1618 	case 10:
1619 		tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_ENABLE, 1);
1620 		tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 1);
1621 		DRM_DEBUG("%s: Enabling hdmi deep color 30 for 10 bpc.\n",
1622 			  connector->name);
1623 		break;
1624 	case 12:
1625 		tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_ENABLE, 1);
1626 		tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 2);
1627 		DRM_DEBUG("%s: Enabling hdmi deep color 36 for 12 bpc.\n",
1628 			  connector->name);
1629 		break;
1630 	}
1631 	WREG32(mmHDMI_CONTROL + dig->afmt->offset, tmp);
1632 
1633 	tmp = RREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset);
1634 	tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_NULL_SEND, 1); /* send null packets when required */
1635 	tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_GC_SEND, 1); /* send general control packets */
1636 	tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_GC_CONT, 1); /* send general control packets every frame */
1637 	WREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset, tmp);
1638 
1639 	tmp = RREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset);
1640 	/* enable audio info frames (frames won't be set until audio is enabled) */
1641 	tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_SEND, 1);
1642 	/* required for audio info values to be updated */
1643 	tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_CONT, 1);
1644 	WREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp);
1645 
1646 	tmp = RREG32(mmAFMT_INFOFRAME_CONTROL0 + dig->afmt->offset);
1647 	/* required for audio info values to be updated */
1648 	tmp = REG_SET_FIELD(tmp, AFMT_INFOFRAME_CONTROL0, AFMT_AUDIO_INFO_UPDATE, 1);
1649 	WREG32(mmAFMT_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp);
1650 
1651 	tmp = RREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset);
1652 	/* anything other than 0 */
1653 	tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL1, HDMI_AUDIO_INFO_LINE, 2);
1654 	WREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset, tmp);
1655 
1656 	WREG32(mmHDMI_GC + dig->afmt->offset, 0); /* unset HDMI_GC_AVMUTE */
1657 
1658 	tmp = RREG32(mmHDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset);
1659 	/* set the default audio delay */
1660 	tmp = REG_SET_FIELD(tmp, HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_DELAY_EN, 1);
1661 	/* should be suffient for all audio modes and small enough for all hblanks */
1662 	tmp = REG_SET_FIELD(tmp, HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_PACKETS_PER_LINE, 3);
1663 	WREG32(mmHDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
1664 
1665 	tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset);
1666 	/* allow 60958 channel status fields to be updated */
1667 	tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_60958_CS_UPDATE, 1);
1668 	WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
1669 
1670 	tmp = RREG32(mmHDMI_ACR_PACKET_CONTROL + dig->afmt->offset);
1671 	if (bpc > 8)
1672 		/* clear SW CTS value */
1673 		tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_SOURCE, 0);
1674 	else
1675 		/* select SW CTS value */
1676 		tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_SOURCE, 1);
1677 	/* allow hw to sent ACR packets when required */
1678 	tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_AUTO_SEND, 1);
1679 	WREG32(mmHDMI_ACR_PACKET_CONTROL + dig->afmt->offset, tmp);
1680 
1681 	dce_v10_0_afmt_update_ACR(encoder, mode->clock);
1682 
1683 	tmp = RREG32(mmAFMT_60958_0 + dig->afmt->offset);
1684 	tmp = REG_SET_FIELD(tmp, AFMT_60958_0, AFMT_60958_CS_CHANNEL_NUMBER_L, 1);
1685 	WREG32(mmAFMT_60958_0 + dig->afmt->offset, tmp);
1686 
1687 	tmp = RREG32(mmAFMT_60958_1 + dig->afmt->offset);
1688 	tmp = REG_SET_FIELD(tmp, AFMT_60958_1, AFMT_60958_CS_CHANNEL_NUMBER_R, 2);
1689 	WREG32(mmAFMT_60958_1 + dig->afmt->offset, tmp);
1690 
1691 	tmp = RREG32(mmAFMT_60958_2 + dig->afmt->offset);
1692 	tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_2, 3);
1693 	tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_3, 4);
1694 	tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_4, 5);
1695 	tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_5, 6);
1696 	tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_6, 7);
1697 	tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_7, 8);
1698 	WREG32(mmAFMT_60958_2 + dig->afmt->offset, tmp);
1699 
1700 	dce_v10_0_audio_write_speaker_allocation(encoder);
1701 
1702 	WREG32(mmAFMT_AUDIO_PACKET_CONTROL2 + dig->afmt->offset,
1703 	       (0xff << AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE__SHIFT));
1704 
1705 	dce_v10_0_afmt_audio_select_pin(encoder);
1706 	dce_v10_0_audio_write_sad_regs(encoder);
1707 	dce_v10_0_audio_write_latency_fields(encoder, mode);
1708 
1709 	err = drm_hdmi_avi_infoframe_from_display_mode(&frame, connector, mode);
1710 	if (err < 0) {
1711 		DRM_ERROR("failed to setup AVI infoframe: %zd\n", err);
1712 		return;
1713 	}
1714 
1715 	err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer));
1716 	if (err < 0) {
1717 		DRM_ERROR("failed to pack AVI infoframe: %zd\n", err);
1718 		return;
1719 	}
1720 
1721 	dce_v10_0_afmt_update_avi_infoframe(encoder, buffer, sizeof(buffer));
1722 
1723 	tmp = RREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset);
1724 	/* enable AVI info frames */
1725 	tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_SEND, 1);
1726 	/* required for audio info values to be updated */
1727 	tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_CONT, 1);
1728 	WREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp);
1729 
1730 	tmp = RREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset);
1731 	tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL1, HDMI_AVI_INFO_LINE, 2);
1732 	WREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset, tmp);
1733 
1734 	tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset);
1735 	/* send audio packets */
1736 	tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, 1);
1737 	WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
1738 
1739 	WREG32(mmAFMT_RAMP_CONTROL0 + dig->afmt->offset, 0x00FFFFFF);
1740 	WREG32(mmAFMT_RAMP_CONTROL1 + dig->afmt->offset, 0x007FFFFF);
1741 	WREG32(mmAFMT_RAMP_CONTROL2 + dig->afmt->offset, 0x00000001);
1742 	WREG32(mmAFMT_RAMP_CONTROL3 + dig->afmt->offset, 0x00000001);
1743 
1744 	/* enable audio after to setting up hw */
1745 	dce_v10_0_audio_enable(adev, dig->afmt->pin, true);
1746 }
1747 
1748 static void dce_v10_0_afmt_enable(struct drm_encoder *encoder, bool enable)
1749 {
1750 	struct drm_device *dev = encoder->dev;
1751 	struct amdgpu_device *adev = drm_to_adev(dev);
1752 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1753 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1754 
1755 	if (!dig || !dig->afmt)
1756 		return;
1757 
1758 	/* Silent, r600_hdmi_enable will raise WARN for us */
1759 	if (enable && dig->afmt->enabled)
1760 		return;
1761 	if (!enable && !dig->afmt->enabled)
1762 		return;
1763 
1764 	if (!enable && dig->afmt->pin) {
1765 		dce_v10_0_audio_enable(adev, dig->afmt->pin, false);
1766 		dig->afmt->pin = NULL;
1767 	}
1768 
1769 	dig->afmt->enabled = enable;
1770 
1771 	DRM_DEBUG("%sabling AFMT interface @ 0x%04X for encoder 0x%x\n",
1772 		  enable ? "En" : "Dis", dig->afmt->offset, amdgpu_encoder->encoder_id);
1773 }
1774 
1775 static int dce_v10_0_afmt_init(struct amdgpu_device *adev)
1776 {
1777 	int i;
1778 
1779 	for (i = 0; i < adev->mode_info.num_dig; i++)
1780 		adev->mode_info.afmt[i] = NULL;
1781 
1782 	/* DCE10 has audio blocks tied to DIG encoders */
1783 	for (i = 0; i < adev->mode_info.num_dig; i++) {
1784 		adev->mode_info.afmt[i] = kzalloc(sizeof(struct amdgpu_afmt), GFP_KERNEL);
1785 		if (adev->mode_info.afmt[i]) {
1786 			adev->mode_info.afmt[i]->offset = dig_offsets[i];
1787 			adev->mode_info.afmt[i]->id = i;
1788 		} else {
1789 			int j;
1790 			for (j = 0; j < i; j++) {
1791 				kfree(adev->mode_info.afmt[j]);
1792 				adev->mode_info.afmt[j] = NULL;
1793 			}
1794 			return -ENOMEM;
1795 		}
1796 	}
1797 	return 0;
1798 }
1799 
1800 static void dce_v10_0_afmt_fini(struct amdgpu_device *adev)
1801 {
1802 	int i;
1803 
1804 	for (i = 0; i < adev->mode_info.num_dig; i++) {
1805 		kfree(adev->mode_info.afmt[i]);
1806 		adev->mode_info.afmt[i] = NULL;
1807 	}
1808 }
1809 
1810 static const u32 vga_control_regs[6] = {
1811 	mmD1VGA_CONTROL,
1812 	mmD2VGA_CONTROL,
1813 	mmD3VGA_CONTROL,
1814 	mmD4VGA_CONTROL,
1815 	mmD5VGA_CONTROL,
1816 	mmD6VGA_CONTROL,
1817 };
1818 
1819 static void dce_v10_0_vga_enable(struct drm_crtc *crtc, bool enable)
1820 {
1821 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1822 	struct drm_device *dev = crtc->dev;
1823 	struct amdgpu_device *adev = drm_to_adev(dev);
1824 	u32 vga_control;
1825 
1826 	vga_control = RREG32(vga_control_regs[amdgpu_crtc->crtc_id]) & ~1;
1827 	if (enable)
1828 		WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control | 1);
1829 	else
1830 		WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control);
1831 }
1832 
1833 static void dce_v10_0_grph_enable(struct drm_crtc *crtc, bool enable)
1834 {
1835 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1836 	struct drm_device *dev = crtc->dev;
1837 	struct amdgpu_device *adev = drm_to_adev(dev);
1838 
1839 	if (enable)
1840 		WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 1);
1841 	else
1842 		WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 0);
1843 }
1844 
1845 static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc,
1846 				     struct drm_framebuffer *fb,
1847 				     int x, int y, int atomic)
1848 {
1849 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1850 	struct drm_device *dev = crtc->dev;
1851 	struct amdgpu_device *adev = drm_to_adev(dev);
1852 	struct drm_framebuffer *target_fb;
1853 	struct drm_gem_object *obj;
1854 	struct amdgpu_bo *abo;
1855 	uint64_t fb_location, tiling_flags;
1856 	uint32_t fb_format, fb_pitch_pixels;
1857 	u32 fb_swap = REG_SET_FIELD(0, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, ENDIAN_NONE);
1858 	u32 pipe_config;
1859 	u32 tmp, viewport_w, viewport_h;
1860 	int r;
1861 	bool bypass_lut = false;
1862 
1863 	/* no fb bound */
1864 	if (!atomic && !crtc->primary->fb) {
1865 		DRM_DEBUG_KMS("No FB bound\n");
1866 		return 0;
1867 	}
1868 
1869 	if (atomic)
1870 		target_fb = fb;
1871 	else
1872 		target_fb = crtc->primary->fb;
1873 
1874 	/* If atomic, assume fb object is pinned & idle & fenced and
1875 	 * just update base pointers
1876 	 */
1877 	obj = target_fb->obj[0];
1878 	abo = gem_to_amdgpu_bo(obj);
1879 	r = amdgpu_bo_reserve(abo, false);
1880 	if (unlikely(r != 0))
1881 		return r;
1882 
1883 	if (!atomic) {
1884 		r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM);
1885 		if (unlikely(r != 0)) {
1886 			amdgpu_bo_unreserve(abo);
1887 			return -EINVAL;
1888 		}
1889 	}
1890 	fb_location = amdgpu_bo_gpu_offset(abo);
1891 
1892 	amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
1893 	amdgpu_bo_unreserve(abo);
1894 
1895 	pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
1896 
1897 	switch (target_fb->format->format) {
1898 	case DRM_FORMAT_C8:
1899 		fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 0);
1900 		fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0);
1901 		break;
1902 	case DRM_FORMAT_XRGB4444:
1903 	case DRM_FORMAT_ARGB4444:
1904 		fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1);
1905 		fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 2);
1906 #ifdef __BIG_ENDIAN
1907 		fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
1908 					ENDIAN_8IN16);
1909 #endif
1910 		break;
1911 	case DRM_FORMAT_XRGB1555:
1912 	case DRM_FORMAT_ARGB1555:
1913 		fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1);
1914 		fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0);
1915 #ifdef __BIG_ENDIAN
1916 		fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
1917 					ENDIAN_8IN16);
1918 #endif
1919 		break;
1920 	case DRM_FORMAT_BGRX5551:
1921 	case DRM_FORMAT_BGRA5551:
1922 		fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1);
1923 		fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 5);
1924 #ifdef __BIG_ENDIAN
1925 		fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
1926 					ENDIAN_8IN16);
1927 #endif
1928 		break;
1929 	case DRM_FORMAT_RGB565:
1930 		fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1);
1931 		fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 1);
1932 #ifdef __BIG_ENDIAN
1933 		fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
1934 					ENDIAN_8IN16);
1935 #endif
1936 		break;
1937 	case DRM_FORMAT_XRGB8888:
1938 	case DRM_FORMAT_ARGB8888:
1939 		fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 2);
1940 		fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0);
1941 #ifdef __BIG_ENDIAN
1942 		fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
1943 					ENDIAN_8IN32);
1944 #endif
1945 		break;
1946 	case DRM_FORMAT_XRGB2101010:
1947 	case DRM_FORMAT_ARGB2101010:
1948 		fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 2);
1949 		fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 1);
1950 #ifdef __BIG_ENDIAN
1951 		fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
1952 					ENDIAN_8IN32);
1953 #endif
1954 		/* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
1955 		bypass_lut = true;
1956 		break;
1957 	case DRM_FORMAT_BGRX1010102:
1958 	case DRM_FORMAT_BGRA1010102:
1959 		fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 2);
1960 		fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 4);
1961 #ifdef __BIG_ENDIAN
1962 		fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
1963 					ENDIAN_8IN32);
1964 #endif
1965 		/* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
1966 		bypass_lut = true;
1967 		break;
1968 	case DRM_FORMAT_XBGR8888:
1969 	case DRM_FORMAT_ABGR8888:
1970 		fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 2);
1971 		fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0);
1972 		fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_RED_CROSSBAR, 2);
1973 		fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_BLUE_CROSSBAR, 2);
1974 #ifdef __BIG_ENDIAN
1975 		fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
1976 					ENDIAN_8IN32);
1977 #endif
1978 		break;
1979 	default:
1980 		DRM_ERROR("Unsupported screen format %p4cc\n",
1981 			  &target_fb->format->format);
1982 		return -EINVAL;
1983 	}
1984 
1985 	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_2D_TILED_THIN1) {
1986 		unsigned bankw, bankh, mtaspect, tile_split, num_banks;
1987 
1988 		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
1989 		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
1990 		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
1991 		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
1992 		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
1993 
1994 		fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_NUM_BANKS, num_banks);
1995 		fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_ARRAY_MODE,
1996 					  ARRAY_2D_TILED_THIN1);
1997 		fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_TILE_SPLIT,
1998 					  tile_split);
1999 		fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_BANK_WIDTH, bankw);
2000 		fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_BANK_HEIGHT, bankh);
2001 		fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_MACRO_TILE_ASPECT,
2002 					  mtaspect);
2003 		fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_MICRO_TILE_MODE,
2004 					  ADDR_SURF_MICRO_TILING_DISPLAY);
2005 	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_1D_TILED_THIN1) {
2006 		fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_ARRAY_MODE,
2007 					  ARRAY_1D_TILED_THIN1);
2008 	}
2009 
2010 	fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_PIPE_CONFIG,
2011 				  pipe_config);
2012 
2013 	dce_v10_0_vga_enable(crtc, false);
2014 
2015 	/* Make sure surface address is updated at vertical blank rather than
2016 	 * horizontal blank
2017 	 */
2018 	tmp = RREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset);
2019 	tmp = REG_SET_FIELD(tmp, GRPH_FLIP_CONTROL,
2020 			    GRPH_SURFACE_UPDATE_H_RETRACE_EN, 0);
2021 	WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2022 
2023 	WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
2024 	       upper_32_bits(fb_location));
2025 	WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
2026 	       upper_32_bits(fb_location));
2027 	WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
2028 	       (u32)fb_location & GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK);
2029 	WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
2030 	       (u32) fb_location & GRPH_SECONDARY_SURFACE_ADDRESS__GRPH_SECONDARY_SURFACE_ADDRESS_MASK);
2031 	WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format);
2032 	WREG32(mmGRPH_SWAP_CNTL + amdgpu_crtc->crtc_offset, fb_swap);
2033 
2034 	/*
2035 	 * The LUT only has 256 slots for indexing by a 8 bpc fb. Bypass the LUT
2036 	 * for > 8 bpc scanout to avoid truncation of fb indices to 8 msb's, to
2037 	 * retain the full precision throughout the pipeline.
2038 	 */
2039 	tmp = RREG32(mmGRPH_LUT_10BIT_BYPASS + amdgpu_crtc->crtc_offset);
2040 	if (bypass_lut)
2041 		tmp = REG_SET_FIELD(tmp, GRPH_LUT_10BIT_BYPASS, GRPH_LUT_10BIT_BYPASS_EN, 1);
2042 	else
2043 		tmp = REG_SET_FIELD(tmp, GRPH_LUT_10BIT_BYPASS, GRPH_LUT_10BIT_BYPASS_EN, 0);
2044 	WREG32(mmGRPH_LUT_10BIT_BYPASS + amdgpu_crtc->crtc_offset, tmp);
2045 
2046 	if (bypass_lut)
2047 		DRM_DEBUG_KMS("Bypassing hardware LUT due to 10 bit fb scanout.\n");
2048 
2049 	WREG32(mmGRPH_SURFACE_OFFSET_X + amdgpu_crtc->crtc_offset, 0);
2050 	WREG32(mmGRPH_SURFACE_OFFSET_Y + amdgpu_crtc->crtc_offset, 0);
2051 	WREG32(mmGRPH_X_START + amdgpu_crtc->crtc_offset, 0);
2052 	WREG32(mmGRPH_Y_START + amdgpu_crtc->crtc_offset, 0);
2053 	WREG32(mmGRPH_X_END + amdgpu_crtc->crtc_offset, target_fb->width);
2054 	WREG32(mmGRPH_Y_END + amdgpu_crtc->crtc_offset, target_fb->height);
2055 
2056 	fb_pitch_pixels = target_fb->pitches[0] / target_fb->format->cpp[0];
2057 	WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset, fb_pitch_pixels);
2058 
2059 	dce_v10_0_grph_enable(crtc, true);
2060 
2061 	WREG32(mmLB_DESKTOP_HEIGHT + amdgpu_crtc->crtc_offset,
2062 	       target_fb->height);
2063 
2064 	x &= ~3;
2065 	y &= ~1;
2066 	WREG32(mmVIEWPORT_START + amdgpu_crtc->crtc_offset,
2067 	       (x << 16) | y);
2068 	viewport_w = crtc->mode.hdisplay;
2069 	viewport_h = (crtc->mode.vdisplay + 1) & ~1;
2070 	WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset,
2071 	       (viewport_w << 16) | viewport_h);
2072 
2073 	/* set pageflip to happen anywhere in vblank interval */
2074 	WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0);
2075 
2076 	if (!atomic && fb && fb != crtc->primary->fb) {
2077 		abo = gem_to_amdgpu_bo(fb->obj[0]);
2078 		r = amdgpu_bo_reserve(abo, true);
2079 		if (unlikely(r != 0))
2080 			return r;
2081 		amdgpu_bo_unpin(abo);
2082 		amdgpu_bo_unreserve(abo);
2083 	}
2084 
2085 	/* Bytes per pixel may have changed */
2086 	dce_v10_0_bandwidth_update(adev);
2087 
2088 	return 0;
2089 }
2090 
2091 static void dce_v10_0_set_interleave(struct drm_crtc *crtc,
2092 				     struct drm_display_mode *mode)
2093 {
2094 	struct drm_device *dev = crtc->dev;
2095 	struct amdgpu_device *adev = drm_to_adev(dev);
2096 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2097 	u32 tmp;
2098 
2099 	tmp = RREG32(mmLB_DATA_FORMAT + amdgpu_crtc->crtc_offset);
2100 	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
2101 		tmp = REG_SET_FIELD(tmp, LB_DATA_FORMAT, INTERLEAVE_EN, 1);
2102 	else
2103 		tmp = REG_SET_FIELD(tmp, LB_DATA_FORMAT, INTERLEAVE_EN, 0);
2104 	WREG32(mmLB_DATA_FORMAT + amdgpu_crtc->crtc_offset, tmp);
2105 }
2106 
2107 static void dce_v10_0_crtc_load_lut(struct drm_crtc *crtc)
2108 {
2109 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2110 	struct drm_device *dev = crtc->dev;
2111 	struct amdgpu_device *adev = drm_to_adev(dev);
2112 	u16 *r, *g, *b;
2113 	int i;
2114 	u32 tmp;
2115 
2116 	DRM_DEBUG_KMS("%d\n", amdgpu_crtc->crtc_id);
2117 
2118 	tmp = RREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset);
2119 	tmp = REG_SET_FIELD(tmp, INPUT_CSC_CONTROL, INPUT_CSC_GRPH_MODE, 0);
2120 	tmp = REG_SET_FIELD(tmp, INPUT_CSC_CONTROL, INPUT_CSC_OVL_MODE, 0);
2121 	WREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2122 
2123 	tmp = RREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset);
2124 	tmp = REG_SET_FIELD(tmp, PRESCALE_GRPH_CONTROL, GRPH_PRESCALE_BYPASS, 1);
2125 	WREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2126 
2127 	tmp = RREG32(mmPRESCALE_OVL_CONTROL + amdgpu_crtc->crtc_offset);
2128 	tmp = REG_SET_FIELD(tmp, PRESCALE_OVL_CONTROL, OVL_PRESCALE_BYPASS, 1);
2129 	WREG32(mmPRESCALE_OVL_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2130 
2131 	tmp = RREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset);
2132 	tmp = REG_SET_FIELD(tmp, INPUT_GAMMA_CONTROL, GRPH_INPUT_GAMMA_MODE, 0);
2133 	tmp = REG_SET_FIELD(tmp, INPUT_GAMMA_CONTROL, OVL_INPUT_GAMMA_MODE, 0);
2134 	WREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2135 
2136 	WREG32(mmDC_LUT_CONTROL + amdgpu_crtc->crtc_offset, 0);
2137 
2138 	WREG32(mmDC_LUT_BLACK_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0);
2139 	WREG32(mmDC_LUT_BLACK_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0);
2140 	WREG32(mmDC_LUT_BLACK_OFFSET_RED + amdgpu_crtc->crtc_offset, 0);
2141 
2142 	WREG32(mmDC_LUT_WHITE_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0xffff);
2143 	WREG32(mmDC_LUT_WHITE_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0xffff);
2144 	WREG32(mmDC_LUT_WHITE_OFFSET_RED + amdgpu_crtc->crtc_offset, 0xffff);
2145 
2146 	WREG32(mmDC_LUT_RW_MODE + amdgpu_crtc->crtc_offset, 0);
2147 	WREG32(mmDC_LUT_WRITE_EN_MASK + amdgpu_crtc->crtc_offset, 0x00000007);
2148 
2149 	WREG32(mmDC_LUT_RW_INDEX + amdgpu_crtc->crtc_offset, 0);
2150 	r = crtc->gamma_store;
2151 	g = r + crtc->gamma_size;
2152 	b = g + crtc->gamma_size;
2153 	for (i = 0; i < 256; i++) {
2154 		WREG32(mmDC_LUT_30_COLOR + amdgpu_crtc->crtc_offset,
2155 		       ((*r++ & 0xffc0) << 14) |
2156 		       ((*g++ & 0xffc0) << 4) |
2157 		       (*b++ >> 6));
2158 	}
2159 
2160 	tmp = RREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset);
2161 	tmp = REG_SET_FIELD(tmp, DEGAMMA_CONTROL, GRPH_DEGAMMA_MODE, 0);
2162 	tmp = REG_SET_FIELD(tmp, DEGAMMA_CONTROL, OVL_DEGAMMA_MODE, 0);
2163 	tmp = REG_SET_FIELD(tmp, DEGAMMA_CONTROL, CURSOR_DEGAMMA_MODE, 0);
2164 	WREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2165 
2166 	tmp = RREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset);
2167 	tmp = REG_SET_FIELD(tmp, GAMUT_REMAP_CONTROL, GRPH_GAMUT_REMAP_MODE, 0);
2168 	tmp = REG_SET_FIELD(tmp, GAMUT_REMAP_CONTROL, OVL_GAMUT_REMAP_MODE, 0);
2169 	WREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2170 
2171 	tmp = RREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset);
2172 	tmp = REG_SET_FIELD(tmp, REGAMMA_CONTROL, GRPH_REGAMMA_MODE, 0);
2173 	tmp = REG_SET_FIELD(tmp, REGAMMA_CONTROL, OVL_REGAMMA_MODE, 0);
2174 	WREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2175 
2176 	tmp = RREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset);
2177 	tmp = REG_SET_FIELD(tmp, OUTPUT_CSC_CONTROL, OUTPUT_CSC_GRPH_MODE, 0);
2178 	tmp = REG_SET_FIELD(tmp, OUTPUT_CSC_CONTROL, OUTPUT_CSC_OVL_MODE, 0);
2179 	WREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2180 
2181 	/* XXX match this to the depth of the crtc fmt block, move to modeset? */
2182 	WREG32(mmDENORM_CONTROL + amdgpu_crtc->crtc_offset, 0);
2183 	/* XXX this only needs to be programmed once per crtc at startup,
2184 	 * not sure where the best place for it is
2185 	 */
2186 	tmp = RREG32(mmALPHA_CONTROL + amdgpu_crtc->crtc_offset);
2187 	tmp = REG_SET_FIELD(tmp, ALPHA_CONTROL, CURSOR_ALPHA_BLND_ENA, 1);
2188 	WREG32(mmALPHA_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2189 }
2190 
2191 static int dce_v10_0_pick_dig_encoder(struct drm_encoder *encoder)
2192 {
2193 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
2194 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
2195 
2196 	switch (amdgpu_encoder->encoder_id) {
2197 	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
2198 		if (dig->linkb)
2199 			return 1;
2200 		else
2201 			return 0;
2202 	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
2203 		if (dig->linkb)
2204 			return 3;
2205 		else
2206 			return 2;
2207 	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
2208 		if (dig->linkb)
2209 			return 5;
2210 		else
2211 			return 4;
2212 	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
2213 		return 6;
2214 	default:
2215 		DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder->encoder_id);
2216 		return 0;
2217 	}
2218 }
2219 
2220 /**
2221  * dce_v10_0_pick_pll - Allocate a PPLL for use by the crtc.
2222  *
2223  * @crtc: drm crtc
2224  *
2225  * Returns the PPLL (Pixel PLL) to be used by the crtc.  For DP monitors
2226  * a single PPLL can be used for all DP crtcs/encoders.  For non-DP
2227  * monitors a dedicated PPLL must be used.  If a particular board has
2228  * an external DP PLL, return ATOM_PPLL_INVALID to skip PLL programming
2229  * as there is no need to program the PLL itself.  If we are not able to
2230  * allocate a PLL, return ATOM_PPLL_INVALID to skip PLL programming to
2231  * avoid messing up an existing monitor.
2232  *
2233  * Asic specific PLL information
2234  *
2235  * DCE 10.x
2236  * Tonga
2237  * - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP)
2238  * CI
2239  * - PPLL0, PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC
2240  *
2241  */
2242 static u32 dce_v10_0_pick_pll(struct drm_crtc *crtc)
2243 {
2244 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2245 	struct drm_device *dev = crtc->dev;
2246 	struct amdgpu_device *adev = drm_to_adev(dev);
2247 	u32 pll_in_use;
2248 	int pll;
2249 
2250 	if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder))) {
2251 		if (adev->clock.dp_extclk)
2252 			/* skip PPLL programming if using ext clock */
2253 			return ATOM_PPLL_INVALID;
2254 		else {
2255 			/* use the same PPLL for all DP monitors */
2256 			pll = amdgpu_pll_get_shared_dp_ppll(crtc);
2257 			if (pll != ATOM_PPLL_INVALID)
2258 				return pll;
2259 		}
2260 	} else {
2261 		/* use the same PPLL for all monitors with the same clock */
2262 		pll = amdgpu_pll_get_shared_nondp_ppll(crtc);
2263 		if (pll != ATOM_PPLL_INVALID)
2264 			return pll;
2265 	}
2266 
2267 	/* DCE10 has PPLL0, PPLL1, and PPLL2 */
2268 	pll_in_use = amdgpu_pll_get_use_mask(crtc);
2269 	if (!(pll_in_use & (1 << ATOM_PPLL2)))
2270 		return ATOM_PPLL2;
2271 	if (!(pll_in_use & (1 << ATOM_PPLL1)))
2272 		return ATOM_PPLL1;
2273 	if (!(pll_in_use & (1 << ATOM_PPLL0)))
2274 		return ATOM_PPLL0;
2275 	DRM_ERROR("unable to allocate a PPLL\n");
2276 	return ATOM_PPLL_INVALID;
2277 }
2278 
2279 static void dce_v10_0_lock_cursor(struct drm_crtc *crtc, bool lock)
2280 {
2281 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
2282 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2283 	uint32_t cur_lock;
2284 
2285 	cur_lock = RREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset);
2286 	if (lock)
2287 		cur_lock = REG_SET_FIELD(cur_lock, CUR_UPDATE, CURSOR_UPDATE_LOCK, 1);
2288 	else
2289 		cur_lock = REG_SET_FIELD(cur_lock, CUR_UPDATE, CURSOR_UPDATE_LOCK, 0);
2290 	WREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset, cur_lock);
2291 }
2292 
2293 static void dce_v10_0_hide_cursor(struct drm_crtc *crtc)
2294 {
2295 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2296 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
2297 	u32 tmp;
2298 
2299 	tmp = RREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset);
2300 	tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_EN, 0);
2301 	WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2302 }
2303 
2304 static void dce_v10_0_show_cursor(struct drm_crtc *crtc)
2305 {
2306 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2307 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
2308 	u32 tmp;
2309 
2310 	WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
2311 	       upper_32_bits(amdgpu_crtc->cursor_addr));
2312 	WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
2313 	       lower_32_bits(amdgpu_crtc->cursor_addr));
2314 
2315 	tmp = RREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset);
2316 	tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_EN, 1);
2317 	tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_MODE, 2);
2318 	WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2319 }
2320 
2321 static int dce_v10_0_cursor_move_locked(struct drm_crtc *crtc,
2322 					int x, int y)
2323 {
2324 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2325 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
2326 	int xorigin = 0, yorigin = 0;
2327 
2328 	amdgpu_crtc->cursor_x = x;
2329 	amdgpu_crtc->cursor_y = y;
2330 
2331 	/* avivo cursor are offset into the total surface */
2332 	x += crtc->x;
2333 	y += crtc->y;
2334 	DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
2335 
2336 	if (x < 0) {
2337 		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
2338 		x = 0;
2339 	}
2340 	if (y < 0) {
2341 		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
2342 		y = 0;
2343 	}
2344 
2345 	WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
2346 	WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
2347 	WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
2348 	       ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
2349 
2350 	return 0;
2351 }
2352 
2353 static int dce_v10_0_crtc_cursor_move(struct drm_crtc *crtc,
2354 				      int x, int y)
2355 {
2356 	int ret;
2357 
2358 	dce_v10_0_lock_cursor(crtc, true);
2359 	ret = dce_v10_0_cursor_move_locked(crtc, x, y);
2360 	dce_v10_0_lock_cursor(crtc, false);
2361 
2362 	return ret;
2363 }
2364 
2365 static int dce_v10_0_crtc_cursor_set2(struct drm_crtc *crtc,
2366 				      struct drm_file *file_priv,
2367 				      uint32_t handle,
2368 				      uint32_t width,
2369 				      uint32_t height,
2370 				      int32_t hot_x,
2371 				      int32_t hot_y)
2372 {
2373 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2374 	struct drm_gem_object *obj;
2375 	struct amdgpu_bo *aobj;
2376 	int ret;
2377 
2378 	if (!handle) {
2379 		/* turn off cursor */
2380 		dce_v10_0_hide_cursor(crtc);
2381 		obj = NULL;
2382 		goto unpin;
2383 	}
2384 
2385 	if ((width > amdgpu_crtc->max_cursor_width) ||
2386 	    (height > amdgpu_crtc->max_cursor_height)) {
2387 		DRM_ERROR("bad cursor width or height %d x %d\n", width, height);
2388 		return -EINVAL;
2389 	}
2390 
2391 	obj = drm_gem_object_lookup(file_priv, handle);
2392 	if (!obj) {
2393 		DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, amdgpu_crtc->crtc_id);
2394 		return -ENOENT;
2395 	}
2396 
2397 	aobj = gem_to_amdgpu_bo(obj);
2398 	ret = amdgpu_bo_reserve(aobj, false);
2399 	if (ret != 0) {
2400 		drm_gem_object_put(obj);
2401 		return ret;
2402 	}
2403 
2404 	ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
2405 	amdgpu_bo_unreserve(aobj);
2406 	if (ret) {
2407 		DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
2408 		drm_gem_object_put(obj);
2409 		return ret;
2410 	}
2411 	amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
2412 
2413 	dce_v10_0_lock_cursor(crtc, true);
2414 
2415 	if (width != amdgpu_crtc->cursor_width ||
2416 	    height != amdgpu_crtc->cursor_height ||
2417 	    hot_x != amdgpu_crtc->cursor_hot_x ||
2418 	    hot_y != amdgpu_crtc->cursor_hot_y) {
2419 		int x, y;
2420 
2421 		x = amdgpu_crtc->cursor_x + amdgpu_crtc->cursor_hot_x - hot_x;
2422 		y = amdgpu_crtc->cursor_y + amdgpu_crtc->cursor_hot_y - hot_y;
2423 
2424 		dce_v10_0_cursor_move_locked(crtc, x, y);
2425 
2426 		amdgpu_crtc->cursor_width = width;
2427 		amdgpu_crtc->cursor_height = height;
2428 		amdgpu_crtc->cursor_hot_x = hot_x;
2429 		amdgpu_crtc->cursor_hot_y = hot_y;
2430 	}
2431 
2432 	dce_v10_0_show_cursor(crtc);
2433 	dce_v10_0_lock_cursor(crtc, false);
2434 
2435 unpin:
2436 	if (amdgpu_crtc->cursor_bo) {
2437 		struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
2438 		ret = amdgpu_bo_reserve(aobj, true);
2439 		if (likely(ret == 0)) {
2440 			amdgpu_bo_unpin(aobj);
2441 			amdgpu_bo_unreserve(aobj);
2442 		}
2443 		drm_gem_object_put(amdgpu_crtc->cursor_bo);
2444 	}
2445 
2446 	amdgpu_crtc->cursor_bo = obj;
2447 	return 0;
2448 }
2449 
2450 static void dce_v10_0_cursor_reset(struct drm_crtc *crtc)
2451 {
2452 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2453 
2454 	if (amdgpu_crtc->cursor_bo) {
2455 		dce_v10_0_lock_cursor(crtc, true);
2456 
2457 		dce_v10_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
2458 					     amdgpu_crtc->cursor_y);
2459 
2460 		dce_v10_0_show_cursor(crtc);
2461 
2462 		dce_v10_0_lock_cursor(crtc, false);
2463 	}
2464 }
2465 
2466 static int dce_v10_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
2467 				    u16 *blue, uint32_t size,
2468 				    struct drm_modeset_acquire_ctx *ctx)
2469 {
2470 	dce_v10_0_crtc_load_lut(crtc);
2471 
2472 	return 0;
2473 }
2474 
2475 static void dce_v10_0_crtc_destroy(struct drm_crtc *crtc)
2476 {
2477 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2478 
2479 	drm_crtc_cleanup(crtc);
2480 	kfree(amdgpu_crtc);
2481 }
2482 
2483 static const struct drm_crtc_funcs dce_v10_0_crtc_funcs = {
2484 	.cursor_set2 = dce_v10_0_crtc_cursor_set2,
2485 	.cursor_move = dce_v10_0_crtc_cursor_move,
2486 	.gamma_set = dce_v10_0_crtc_gamma_set,
2487 	.set_config = amdgpu_display_crtc_set_config,
2488 	.destroy = dce_v10_0_crtc_destroy,
2489 	.page_flip_target = amdgpu_display_crtc_page_flip_target,
2490 	.get_vblank_counter = amdgpu_get_vblank_counter_kms,
2491 	.enable_vblank = amdgpu_enable_vblank_kms,
2492 	.disable_vblank = amdgpu_disable_vblank_kms,
2493 	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
2494 };
2495 
2496 static void dce_v10_0_crtc_dpms(struct drm_crtc *crtc, int mode)
2497 {
2498 	struct drm_device *dev = crtc->dev;
2499 	struct amdgpu_device *adev = drm_to_adev(dev);
2500 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2501 	unsigned type;
2502 
2503 	switch (mode) {
2504 	case DRM_MODE_DPMS_ON:
2505 		amdgpu_crtc->enabled = true;
2506 		amdgpu_atombios_crtc_enable(crtc, ATOM_ENABLE);
2507 		dce_v10_0_vga_enable(crtc, true);
2508 		amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
2509 		dce_v10_0_vga_enable(crtc, false);
2510 		/* Make sure VBLANK and PFLIP interrupts are still enabled */
2511 		type = amdgpu_display_crtc_idx_to_irq_type(adev,
2512 						amdgpu_crtc->crtc_id);
2513 		amdgpu_irq_update(adev, &adev->crtc_irq, type);
2514 		amdgpu_irq_update(adev, &adev->pageflip_irq, type);
2515 		drm_crtc_vblank_on(crtc);
2516 		dce_v10_0_crtc_load_lut(crtc);
2517 		break;
2518 	case DRM_MODE_DPMS_STANDBY:
2519 	case DRM_MODE_DPMS_SUSPEND:
2520 	case DRM_MODE_DPMS_OFF:
2521 		drm_crtc_vblank_off(crtc);
2522 		if (amdgpu_crtc->enabled) {
2523 			dce_v10_0_vga_enable(crtc, true);
2524 			amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE);
2525 			dce_v10_0_vga_enable(crtc, false);
2526 		}
2527 		amdgpu_atombios_crtc_enable(crtc, ATOM_DISABLE);
2528 		amdgpu_crtc->enabled = false;
2529 		break;
2530 	}
2531 	/* adjust pm to dpms */
2532 	amdgpu_dpm_compute_clocks(adev);
2533 }
2534 
2535 static void dce_v10_0_crtc_prepare(struct drm_crtc *crtc)
2536 {
2537 	/* disable crtc pair power gating before programming */
2538 	amdgpu_atombios_crtc_powergate(crtc, ATOM_DISABLE);
2539 	amdgpu_atombios_crtc_lock(crtc, ATOM_ENABLE);
2540 	dce_v10_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
2541 }
2542 
2543 static void dce_v10_0_crtc_commit(struct drm_crtc *crtc)
2544 {
2545 	dce_v10_0_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
2546 	amdgpu_atombios_crtc_lock(crtc, ATOM_DISABLE);
2547 }
2548 
2549 static void dce_v10_0_crtc_disable(struct drm_crtc *crtc)
2550 {
2551 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2552 	struct drm_device *dev = crtc->dev;
2553 	struct amdgpu_device *adev = drm_to_adev(dev);
2554 	struct amdgpu_atom_ss ss;
2555 	int i;
2556 
2557 	dce_v10_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
2558 	if (crtc->primary->fb) {
2559 		int r;
2560 		struct amdgpu_bo *abo;
2561 
2562 		abo = gem_to_amdgpu_bo(crtc->primary->fb->obj[0]);
2563 		r = amdgpu_bo_reserve(abo, true);
2564 		if (unlikely(r))
2565 			DRM_ERROR("failed to reserve abo before unpin\n");
2566 		else {
2567 			amdgpu_bo_unpin(abo);
2568 			amdgpu_bo_unreserve(abo);
2569 		}
2570 	}
2571 	/* disable the GRPH */
2572 	dce_v10_0_grph_enable(crtc, false);
2573 
2574 	amdgpu_atombios_crtc_powergate(crtc, ATOM_ENABLE);
2575 
2576 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
2577 		if (adev->mode_info.crtcs[i] &&
2578 		    adev->mode_info.crtcs[i]->enabled &&
2579 		    i != amdgpu_crtc->crtc_id &&
2580 		    amdgpu_crtc->pll_id == adev->mode_info.crtcs[i]->pll_id) {
2581 			/* one other crtc is using this pll don't turn
2582 			 * off the pll
2583 			 */
2584 			goto done;
2585 		}
2586 	}
2587 
2588 	switch (amdgpu_crtc->pll_id) {
2589 	case ATOM_PPLL0:
2590 	case ATOM_PPLL1:
2591 	case ATOM_PPLL2:
2592 		/* disable the ppll */
2593 		amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id,
2594 					  0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
2595 		break;
2596 	default:
2597 		break;
2598 	}
2599 done:
2600 	amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
2601 	amdgpu_crtc->adjusted_clock = 0;
2602 	amdgpu_crtc->encoder = NULL;
2603 	amdgpu_crtc->connector = NULL;
2604 }
2605 
2606 static int dce_v10_0_crtc_mode_set(struct drm_crtc *crtc,
2607 				  struct drm_display_mode *mode,
2608 				  struct drm_display_mode *adjusted_mode,
2609 				  int x, int y, struct drm_framebuffer *old_fb)
2610 {
2611 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2612 
2613 	if (!amdgpu_crtc->adjusted_clock)
2614 		return -EINVAL;
2615 
2616 	amdgpu_atombios_crtc_set_pll(crtc, adjusted_mode);
2617 	amdgpu_atombios_crtc_set_dtd_timing(crtc, adjusted_mode);
2618 	dce_v10_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
2619 	amdgpu_atombios_crtc_overscan_setup(crtc, mode, adjusted_mode);
2620 	amdgpu_atombios_crtc_scaler_setup(crtc);
2621 	dce_v10_0_cursor_reset(crtc);
2622 	/* update the hw version fpr dpm */
2623 	amdgpu_crtc->hw_mode = *adjusted_mode;
2624 
2625 	return 0;
2626 }
2627 
2628 static bool dce_v10_0_crtc_mode_fixup(struct drm_crtc *crtc,
2629 				     const struct drm_display_mode *mode,
2630 				     struct drm_display_mode *adjusted_mode)
2631 {
2632 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2633 	struct drm_device *dev = crtc->dev;
2634 	struct drm_encoder *encoder;
2635 
2636 	/* assign the encoder to the amdgpu crtc to avoid repeated lookups later */
2637 	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
2638 		if (encoder->crtc == crtc) {
2639 			amdgpu_crtc->encoder = encoder;
2640 			amdgpu_crtc->connector = amdgpu_get_connector_for_encoder(encoder);
2641 			break;
2642 		}
2643 	}
2644 	if ((amdgpu_crtc->encoder == NULL) || (amdgpu_crtc->connector == NULL)) {
2645 		amdgpu_crtc->encoder = NULL;
2646 		amdgpu_crtc->connector = NULL;
2647 		return false;
2648 	}
2649 	if (!amdgpu_display_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
2650 		return false;
2651 	if (amdgpu_atombios_crtc_prepare_pll(crtc, adjusted_mode))
2652 		return false;
2653 	/* pick pll */
2654 	amdgpu_crtc->pll_id = dce_v10_0_pick_pll(crtc);
2655 	/* if we can't get a PPLL for a non-DP encoder, fail */
2656 	if ((amdgpu_crtc->pll_id == ATOM_PPLL_INVALID) &&
2657 	    !ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder)))
2658 		return false;
2659 
2660 	return true;
2661 }
2662 
2663 static int dce_v10_0_crtc_set_base(struct drm_crtc *crtc, int x, int y,
2664 				  struct drm_framebuffer *old_fb)
2665 {
2666 	return dce_v10_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
2667 }
2668 
2669 static int dce_v10_0_crtc_set_base_atomic(struct drm_crtc *crtc,
2670 					 struct drm_framebuffer *fb,
2671 					 int x, int y, enum mode_set_atomic state)
2672 {
2673 	return dce_v10_0_crtc_do_set_base(crtc, fb, x, y, 1);
2674 }
2675 
2676 static const struct drm_crtc_helper_funcs dce_v10_0_crtc_helper_funcs = {
2677 	.dpms = dce_v10_0_crtc_dpms,
2678 	.mode_fixup = dce_v10_0_crtc_mode_fixup,
2679 	.mode_set = dce_v10_0_crtc_mode_set,
2680 	.mode_set_base = dce_v10_0_crtc_set_base,
2681 	.mode_set_base_atomic = dce_v10_0_crtc_set_base_atomic,
2682 	.prepare = dce_v10_0_crtc_prepare,
2683 	.commit = dce_v10_0_crtc_commit,
2684 	.disable = dce_v10_0_crtc_disable,
2685 	.get_scanout_position = amdgpu_crtc_get_scanout_position,
2686 };
2687 
2688 static int dce_v10_0_crtc_init(struct amdgpu_device *adev, int index)
2689 {
2690 	struct amdgpu_crtc *amdgpu_crtc;
2691 
2692 	amdgpu_crtc = kzalloc(sizeof(struct amdgpu_crtc) +
2693 			      (AMDGPUFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
2694 	if (amdgpu_crtc == NULL)
2695 		return -ENOMEM;
2696 
2697 	drm_crtc_init(adev_to_drm(adev), &amdgpu_crtc->base, &dce_v10_0_crtc_funcs);
2698 
2699 	drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256);
2700 	amdgpu_crtc->crtc_id = index;
2701 	adev->mode_info.crtcs[index] = amdgpu_crtc;
2702 
2703 	amdgpu_crtc->max_cursor_width = 128;
2704 	amdgpu_crtc->max_cursor_height = 128;
2705 	adev_to_drm(adev)->mode_config.cursor_width = amdgpu_crtc->max_cursor_width;
2706 	adev_to_drm(adev)->mode_config.cursor_height = amdgpu_crtc->max_cursor_height;
2707 
2708 	switch (amdgpu_crtc->crtc_id) {
2709 	case 0:
2710 	default:
2711 		amdgpu_crtc->crtc_offset = CRTC0_REGISTER_OFFSET;
2712 		break;
2713 	case 1:
2714 		amdgpu_crtc->crtc_offset = CRTC1_REGISTER_OFFSET;
2715 		break;
2716 	case 2:
2717 		amdgpu_crtc->crtc_offset = CRTC2_REGISTER_OFFSET;
2718 		break;
2719 	case 3:
2720 		amdgpu_crtc->crtc_offset = CRTC3_REGISTER_OFFSET;
2721 		break;
2722 	case 4:
2723 		amdgpu_crtc->crtc_offset = CRTC4_REGISTER_OFFSET;
2724 		break;
2725 	case 5:
2726 		amdgpu_crtc->crtc_offset = CRTC5_REGISTER_OFFSET;
2727 		break;
2728 	}
2729 
2730 	amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
2731 	amdgpu_crtc->adjusted_clock = 0;
2732 	amdgpu_crtc->encoder = NULL;
2733 	amdgpu_crtc->connector = NULL;
2734 	drm_crtc_helper_add(&amdgpu_crtc->base, &dce_v10_0_crtc_helper_funcs);
2735 
2736 	return 0;
2737 }
2738 
2739 static int dce_v10_0_early_init(void *handle)
2740 {
2741 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2742 
2743 	adev->audio_endpt_rreg = &dce_v10_0_audio_endpt_rreg;
2744 	adev->audio_endpt_wreg = &dce_v10_0_audio_endpt_wreg;
2745 
2746 	dce_v10_0_set_display_funcs(adev);
2747 
2748 	adev->mode_info.num_crtc = dce_v10_0_get_num_crtc(adev);
2749 
2750 	switch (adev->asic_type) {
2751 	case CHIP_FIJI:
2752 	case CHIP_TONGA:
2753 		adev->mode_info.num_hpd = 6;
2754 		adev->mode_info.num_dig = 7;
2755 		break;
2756 	default:
2757 		/* FIXME: not supported yet */
2758 		return -EINVAL;
2759 	}
2760 
2761 	dce_v10_0_set_irq_funcs(adev);
2762 
2763 	return 0;
2764 }
2765 
2766 static int dce_v10_0_sw_init(void *handle)
2767 {
2768 	int r, i;
2769 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2770 
2771 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
2772 		r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i + 1, &adev->crtc_irq);
2773 		if (r)
2774 			return r;
2775 	}
2776 
2777 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP; i < 20; i += 2) {
2778 		r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i, &adev->pageflip_irq);
2779 		if (r)
2780 			return r;
2781 	}
2782 
2783 	/* HPD hotplug */
2784 	r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2785 	if (r)
2786 		return r;
2787 
2788 	adev_to_drm(adev)->mode_config.funcs = &amdgpu_mode_funcs;
2789 
2790 	adev_to_drm(adev)->mode_config.async_page_flip = true;
2791 
2792 	adev_to_drm(adev)->mode_config.max_width = 16384;
2793 	adev_to_drm(adev)->mode_config.max_height = 16384;
2794 
2795 	adev_to_drm(adev)->mode_config.preferred_depth = 24;
2796 	adev_to_drm(adev)->mode_config.prefer_shadow = 1;
2797 
2798 	adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true;
2799 
2800 	r = amdgpu_display_modeset_create_props(adev);
2801 	if (r)
2802 		return r;
2803 
2804 	adev_to_drm(adev)->mode_config.max_width = 16384;
2805 	adev_to_drm(adev)->mode_config.max_height = 16384;
2806 
2807 	/* allocate crtcs */
2808 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
2809 		r = dce_v10_0_crtc_init(adev, i);
2810 		if (r)
2811 			return r;
2812 	}
2813 
2814 	if (amdgpu_atombios_get_connector_info_from_object_table(adev))
2815 		amdgpu_display_print_display_setup(adev_to_drm(adev));
2816 	else
2817 		return -EINVAL;
2818 
2819 	/* setup afmt */
2820 	r = dce_v10_0_afmt_init(adev);
2821 	if (r)
2822 		return r;
2823 
2824 	r = dce_v10_0_audio_init(adev);
2825 	if (r)
2826 		return r;
2827 
2828 	/* Disable vblank IRQs aggressively for power-saving */
2829 	/* XXX: can this be enabled for DC? */
2830 	adev_to_drm(adev)->vblank_disable_immediate = true;
2831 
2832 	r = drm_vblank_init(adev_to_drm(adev), adev->mode_info.num_crtc);
2833 	if (r)
2834 		return r;
2835 
2836 	INIT_DELAYED_WORK(&adev->hotplug_work,
2837 		  amdgpu_display_hotplug_work_func);
2838 
2839 	drm_kms_helper_poll_init(adev_to_drm(adev));
2840 
2841 	adev->mode_info.mode_config_initialized = true;
2842 	return 0;
2843 }
2844 
2845 static int dce_v10_0_sw_fini(void *handle)
2846 {
2847 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2848 
2849 	drm_edid_free(adev->mode_info.bios_hardcoded_edid);
2850 
2851 	drm_kms_helper_poll_fini(adev_to_drm(adev));
2852 
2853 	dce_v10_0_audio_fini(adev);
2854 
2855 	dce_v10_0_afmt_fini(adev);
2856 
2857 	drm_mode_config_cleanup(adev_to_drm(adev));
2858 	adev->mode_info.mode_config_initialized = false;
2859 
2860 	return 0;
2861 }
2862 
2863 static int dce_v10_0_hw_init(void *handle)
2864 {
2865 	int i;
2866 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2867 
2868 	dce_v10_0_init_golden_registers(adev);
2869 
2870 	/* disable vga render */
2871 	dce_v10_0_set_vga_render_state(adev, false);
2872 	/* init dig PHYs, disp eng pll */
2873 	amdgpu_atombios_encoder_init_dig(adev);
2874 	amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk);
2875 
2876 	/* initialize hpd */
2877 	dce_v10_0_hpd_init(adev);
2878 
2879 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
2880 		dce_v10_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
2881 	}
2882 
2883 	dce_v10_0_pageflip_interrupt_init(adev);
2884 
2885 	return 0;
2886 }
2887 
2888 static int dce_v10_0_hw_fini(void *handle)
2889 {
2890 	int i;
2891 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2892 
2893 	dce_v10_0_hpd_fini(adev);
2894 
2895 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
2896 		dce_v10_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
2897 	}
2898 
2899 	dce_v10_0_pageflip_interrupt_fini(adev);
2900 
2901 	flush_delayed_work(&adev->hotplug_work);
2902 
2903 	return 0;
2904 }
2905 
2906 static int dce_v10_0_suspend(void *handle)
2907 {
2908 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2909 	int r;
2910 
2911 	r = amdgpu_display_suspend_helper(adev);
2912 	if (r)
2913 		return r;
2914 
2915 	adev->mode_info.bl_level =
2916 		amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
2917 
2918 	return dce_v10_0_hw_fini(handle);
2919 }
2920 
2921 static int dce_v10_0_resume(void *handle)
2922 {
2923 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2924 	int ret;
2925 
2926 	amdgpu_atombios_encoder_set_backlight_level_to_reg(adev,
2927 							   adev->mode_info.bl_level);
2928 
2929 	ret = dce_v10_0_hw_init(handle);
2930 
2931 	/* turn on the BL */
2932 	if (adev->mode_info.bl_encoder) {
2933 		u8 bl_level = amdgpu_display_backlight_get_level(adev,
2934 								  adev->mode_info.bl_encoder);
2935 		amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder,
2936 						    bl_level);
2937 	}
2938 	if (ret)
2939 		return ret;
2940 
2941 	return amdgpu_display_resume_helper(adev);
2942 }
2943 
2944 static bool dce_v10_0_is_idle(void *handle)
2945 {
2946 	return true;
2947 }
2948 
2949 static int dce_v10_0_wait_for_idle(void *handle)
2950 {
2951 	return 0;
2952 }
2953 
2954 static bool dce_v10_0_check_soft_reset(void *handle)
2955 {
2956 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2957 
2958 	return dce_v10_0_is_display_hung(adev);
2959 }
2960 
2961 static int dce_v10_0_soft_reset(void *handle)
2962 {
2963 	u32 srbm_soft_reset = 0, tmp;
2964 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2965 
2966 	if (dce_v10_0_is_display_hung(adev))
2967 		srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK;
2968 
2969 	if (srbm_soft_reset) {
2970 		tmp = RREG32(mmSRBM_SOFT_RESET);
2971 		tmp |= srbm_soft_reset;
2972 		dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
2973 		WREG32(mmSRBM_SOFT_RESET, tmp);
2974 		tmp = RREG32(mmSRBM_SOFT_RESET);
2975 
2976 		udelay(50);
2977 
2978 		tmp &= ~srbm_soft_reset;
2979 		WREG32(mmSRBM_SOFT_RESET, tmp);
2980 		tmp = RREG32(mmSRBM_SOFT_RESET);
2981 
2982 		/* Wait a little for things to settle down */
2983 		udelay(50);
2984 	}
2985 	return 0;
2986 }
2987 
2988 static void dce_v10_0_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev,
2989 						     int crtc,
2990 						     enum amdgpu_interrupt_state state)
2991 {
2992 	u32 lb_interrupt_mask;
2993 
2994 	if (crtc >= adev->mode_info.num_crtc) {
2995 		DRM_DEBUG("invalid crtc %d\n", crtc);
2996 		return;
2997 	}
2998 
2999 	switch (state) {
3000 	case AMDGPU_IRQ_STATE_DISABLE:
3001 		lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]);
3002 		lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK,
3003 						  VBLANK_INTERRUPT_MASK, 0);
3004 		WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask);
3005 		break;
3006 	case AMDGPU_IRQ_STATE_ENABLE:
3007 		lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]);
3008 		lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK,
3009 						  VBLANK_INTERRUPT_MASK, 1);
3010 		WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask);
3011 		break;
3012 	default:
3013 		break;
3014 	}
3015 }
3016 
3017 static void dce_v10_0_set_crtc_vline_interrupt_state(struct amdgpu_device *adev,
3018 						    int crtc,
3019 						    enum amdgpu_interrupt_state state)
3020 {
3021 	u32 lb_interrupt_mask;
3022 
3023 	if (crtc >= adev->mode_info.num_crtc) {
3024 		DRM_DEBUG("invalid crtc %d\n", crtc);
3025 		return;
3026 	}
3027 
3028 	switch (state) {
3029 	case AMDGPU_IRQ_STATE_DISABLE:
3030 		lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]);
3031 		lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK,
3032 						  VLINE_INTERRUPT_MASK, 0);
3033 		WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask);
3034 		break;
3035 	case AMDGPU_IRQ_STATE_ENABLE:
3036 		lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]);
3037 		lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK,
3038 						  VLINE_INTERRUPT_MASK, 1);
3039 		WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask);
3040 		break;
3041 	default:
3042 		break;
3043 	}
3044 }
3045 
3046 static int dce_v10_0_set_hpd_irq_state(struct amdgpu_device *adev,
3047 				       struct amdgpu_irq_src *source,
3048 				       unsigned hpd,
3049 				       enum amdgpu_interrupt_state state)
3050 {
3051 	u32 tmp;
3052 
3053 	if (hpd >= adev->mode_info.num_hpd) {
3054 		DRM_DEBUG("invalid hdp %d\n", hpd);
3055 		return 0;
3056 	}
3057 
3058 	switch (state) {
3059 	case AMDGPU_IRQ_STATE_DISABLE:
3060 		tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]);
3061 		tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 0);
3062 		WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp);
3063 		break;
3064 	case AMDGPU_IRQ_STATE_ENABLE:
3065 		tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]);
3066 		tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 1);
3067 		WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp);
3068 		break;
3069 	default:
3070 		break;
3071 	}
3072 
3073 	return 0;
3074 }
3075 
3076 static int dce_v10_0_set_crtc_irq_state(struct amdgpu_device *adev,
3077 					struct amdgpu_irq_src *source,
3078 					unsigned type,
3079 					enum amdgpu_interrupt_state state)
3080 {
3081 	switch (type) {
3082 	case AMDGPU_CRTC_IRQ_VBLANK1:
3083 		dce_v10_0_set_crtc_vblank_interrupt_state(adev, 0, state);
3084 		break;
3085 	case AMDGPU_CRTC_IRQ_VBLANK2:
3086 		dce_v10_0_set_crtc_vblank_interrupt_state(adev, 1, state);
3087 		break;
3088 	case AMDGPU_CRTC_IRQ_VBLANK3:
3089 		dce_v10_0_set_crtc_vblank_interrupt_state(adev, 2, state);
3090 		break;
3091 	case AMDGPU_CRTC_IRQ_VBLANK4:
3092 		dce_v10_0_set_crtc_vblank_interrupt_state(adev, 3, state);
3093 		break;
3094 	case AMDGPU_CRTC_IRQ_VBLANK5:
3095 		dce_v10_0_set_crtc_vblank_interrupt_state(adev, 4, state);
3096 		break;
3097 	case AMDGPU_CRTC_IRQ_VBLANK6:
3098 		dce_v10_0_set_crtc_vblank_interrupt_state(adev, 5, state);
3099 		break;
3100 	case AMDGPU_CRTC_IRQ_VLINE1:
3101 		dce_v10_0_set_crtc_vline_interrupt_state(adev, 0, state);
3102 		break;
3103 	case AMDGPU_CRTC_IRQ_VLINE2:
3104 		dce_v10_0_set_crtc_vline_interrupt_state(adev, 1, state);
3105 		break;
3106 	case AMDGPU_CRTC_IRQ_VLINE3:
3107 		dce_v10_0_set_crtc_vline_interrupt_state(adev, 2, state);
3108 		break;
3109 	case AMDGPU_CRTC_IRQ_VLINE4:
3110 		dce_v10_0_set_crtc_vline_interrupt_state(adev, 3, state);
3111 		break;
3112 	case AMDGPU_CRTC_IRQ_VLINE5:
3113 		dce_v10_0_set_crtc_vline_interrupt_state(adev, 4, state);
3114 		break;
3115 	case AMDGPU_CRTC_IRQ_VLINE6:
3116 		dce_v10_0_set_crtc_vline_interrupt_state(adev, 5, state);
3117 		break;
3118 	default:
3119 		break;
3120 	}
3121 	return 0;
3122 }
3123 
3124 static int dce_v10_0_set_pageflip_irq_state(struct amdgpu_device *adev,
3125 					    struct amdgpu_irq_src *src,
3126 					    unsigned type,
3127 					    enum amdgpu_interrupt_state state)
3128 {
3129 	u32 reg;
3130 
3131 	if (type >= adev->mode_info.num_crtc) {
3132 		DRM_ERROR("invalid pageflip crtc %d\n", type);
3133 		return -EINVAL;
3134 	}
3135 
3136 	reg = RREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type]);
3137 	if (state == AMDGPU_IRQ_STATE_DISABLE)
3138 		WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
3139 		       reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
3140 	else
3141 		WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
3142 		       reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
3143 
3144 	return 0;
3145 }
3146 
3147 static int dce_v10_0_pageflip_irq(struct amdgpu_device *adev,
3148 				  struct amdgpu_irq_src *source,
3149 				  struct amdgpu_iv_entry *entry)
3150 {
3151 	unsigned long flags;
3152 	unsigned crtc_id;
3153 	struct amdgpu_crtc *amdgpu_crtc;
3154 	struct amdgpu_flip_work *works;
3155 
3156 	crtc_id = (entry->src_id - 8) >> 1;
3157 	amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
3158 
3159 	if (crtc_id >= adev->mode_info.num_crtc) {
3160 		DRM_ERROR("invalid pageflip crtc %d\n", crtc_id);
3161 		return -EINVAL;
3162 	}
3163 
3164 	if (RREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id]) &
3165 	    GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK)
3166 		WREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id],
3167 		       GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK);
3168 
3169 	/* IRQ could occur when in initial stage */
3170 	if (amdgpu_crtc == NULL)
3171 		return 0;
3172 
3173 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
3174 	works = amdgpu_crtc->pflip_works;
3175 	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED) {
3176 		DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
3177 						 "AMDGPU_FLIP_SUBMITTED(%d)\n",
3178 						 amdgpu_crtc->pflip_status,
3179 						 AMDGPU_FLIP_SUBMITTED);
3180 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
3181 		return 0;
3182 	}
3183 
3184 	/* page flip completed. clean up */
3185 	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
3186 	amdgpu_crtc->pflip_works = NULL;
3187 
3188 	/* wakeup usersapce */
3189 	if (works->event)
3190 		drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event);
3191 
3192 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
3193 
3194 	drm_crtc_vblank_put(&amdgpu_crtc->base);
3195 	schedule_work(&works->unpin_work);
3196 
3197 	return 0;
3198 }
3199 
3200 static void dce_v10_0_hpd_int_ack(struct amdgpu_device *adev,
3201 				  int hpd)
3202 {
3203 	u32 tmp;
3204 
3205 	if (hpd >= adev->mode_info.num_hpd) {
3206 		DRM_DEBUG("invalid hdp %d\n", hpd);
3207 		return;
3208 	}
3209 
3210 	tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]);
3211 	tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_ACK, 1);
3212 	WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp);
3213 }
3214 
3215 static void dce_v10_0_crtc_vblank_int_ack(struct amdgpu_device *adev,
3216 					  int crtc)
3217 {
3218 	u32 tmp;
3219 
3220 	if (crtc >= adev->mode_info.num_crtc) {
3221 		DRM_DEBUG("invalid crtc %d\n", crtc);
3222 		return;
3223 	}
3224 
3225 	tmp = RREG32(mmLB_VBLANK_STATUS + crtc_offsets[crtc]);
3226 	tmp = REG_SET_FIELD(tmp, LB_VBLANK_STATUS, VBLANK_ACK, 1);
3227 	WREG32(mmLB_VBLANK_STATUS + crtc_offsets[crtc], tmp);
3228 }
3229 
3230 static void dce_v10_0_crtc_vline_int_ack(struct amdgpu_device *adev,
3231 					 int crtc)
3232 {
3233 	u32 tmp;
3234 
3235 	if (crtc >= adev->mode_info.num_crtc) {
3236 		DRM_DEBUG("invalid crtc %d\n", crtc);
3237 		return;
3238 	}
3239 
3240 	tmp = RREG32(mmLB_VLINE_STATUS + crtc_offsets[crtc]);
3241 	tmp = REG_SET_FIELD(tmp, LB_VLINE_STATUS, VLINE_ACK, 1);
3242 	WREG32(mmLB_VLINE_STATUS + crtc_offsets[crtc], tmp);
3243 }
3244 
3245 static int dce_v10_0_crtc_irq(struct amdgpu_device *adev,
3246 			      struct amdgpu_irq_src *source,
3247 			      struct amdgpu_iv_entry *entry)
3248 {
3249 	unsigned crtc = entry->src_id - 1;
3250 	uint32_t disp_int = RREG32(interrupt_status_offsets[crtc].reg);
3251 	unsigned int irq_type = amdgpu_display_crtc_idx_to_irq_type(adev, crtc);
3252 
3253 	switch (entry->src_data[0]) {
3254 	case 0: /* vblank */
3255 		if (disp_int & interrupt_status_offsets[crtc].vblank)
3256 			dce_v10_0_crtc_vblank_int_ack(adev, crtc);
3257 		else
3258 			DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3259 
3260 		if (amdgpu_irq_enabled(adev, source, irq_type)) {
3261 			drm_handle_vblank(adev_to_drm(adev), crtc);
3262 		}
3263 		DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
3264 
3265 		break;
3266 	case 1: /* vline */
3267 		if (disp_int & interrupt_status_offsets[crtc].vline)
3268 			dce_v10_0_crtc_vline_int_ack(adev, crtc);
3269 		else
3270 			DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3271 
3272 		DRM_DEBUG("IH: D%d vline\n", crtc + 1);
3273 
3274 		break;
3275 	default:
3276 		DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]);
3277 		break;
3278 	}
3279 
3280 	return 0;
3281 }
3282 
3283 static int dce_v10_0_hpd_irq(struct amdgpu_device *adev,
3284 			     struct amdgpu_irq_src *source,
3285 			     struct amdgpu_iv_entry *entry)
3286 {
3287 	uint32_t disp_int, mask;
3288 	unsigned hpd;
3289 
3290 	if (entry->src_data[0] >= adev->mode_info.num_hpd) {
3291 		DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]);
3292 		return 0;
3293 	}
3294 
3295 	hpd = entry->src_data[0];
3296 	disp_int = RREG32(interrupt_status_offsets[hpd].reg);
3297 	mask = interrupt_status_offsets[hpd].hpd;
3298 
3299 	if (disp_int & mask) {
3300 		dce_v10_0_hpd_int_ack(adev, hpd);
3301 		schedule_delayed_work(&adev->hotplug_work, 0);
3302 		DRM_DEBUG("IH: HPD%d\n", hpd + 1);
3303 	}
3304 
3305 	return 0;
3306 }
3307 
3308 static int dce_v10_0_set_clockgating_state(void *handle,
3309 					  enum amd_clockgating_state state)
3310 {
3311 	return 0;
3312 }
3313 
3314 static int dce_v10_0_set_powergating_state(void *handle,
3315 					  enum amd_powergating_state state)
3316 {
3317 	return 0;
3318 }
3319 
3320 static const struct amd_ip_funcs dce_v10_0_ip_funcs = {
3321 	.name = "dce_v10_0",
3322 	.early_init = dce_v10_0_early_init,
3323 	.late_init = NULL,
3324 	.sw_init = dce_v10_0_sw_init,
3325 	.sw_fini = dce_v10_0_sw_fini,
3326 	.hw_init = dce_v10_0_hw_init,
3327 	.hw_fini = dce_v10_0_hw_fini,
3328 	.suspend = dce_v10_0_suspend,
3329 	.resume = dce_v10_0_resume,
3330 	.is_idle = dce_v10_0_is_idle,
3331 	.wait_for_idle = dce_v10_0_wait_for_idle,
3332 	.check_soft_reset = dce_v10_0_check_soft_reset,
3333 	.soft_reset = dce_v10_0_soft_reset,
3334 	.set_clockgating_state = dce_v10_0_set_clockgating_state,
3335 	.set_powergating_state = dce_v10_0_set_powergating_state,
3336 	.dump_ip_state = NULL,
3337 	.print_ip_state = NULL,
3338 };
3339 
3340 static void
3341 dce_v10_0_encoder_mode_set(struct drm_encoder *encoder,
3342 			  struct drm_display_mode *mode,
3343 			  struct drm_display_mode *adjusted_mode)
3344 {
3345 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3346 
3347 	amdgpu_encoder->pixel_clock = adjusted_mode->clock;
3348 
3349 	/* need to call this here rather than in prepare() since we need some crtc info */
3350 	amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
3351 
3352 	/* set scaler clears this on some chips */
3353 	dce_v10_0_set_interleave(encoder->crtc, mode);
3354 
3355 	if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
3356 		dce_v10_0_afmt_enable(encoder, true);
3357 		dce_v10_0_afmt_setmode(encoder, adjusted_mode);
3358 	}
3359 }
3360 
3361 static void dce_v10_0_encoder_prepare(struct drm_encoder *encoder)
3362 {
3363 	struct amdgpu_device *adev = drm_to_adev(encoder->dev);
3364 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3365 	struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
3366 
3367 	if ((amdgpu_encoder->active_device &
3368 	     (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
3369 	    (amdgpu_encoder_get_dp_bridge_encoder_id(encoder) !=
3370 	     ENCODER_OBJECT_ID_NONE)) {
3371 		struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
3372 		if (dig) {
3373 			dig->dig_encoder = dce_v10_0_pick_dig_encoder(encoder);
3374 			if (amdgpu_encoder->active_device & ATOM_DEVICE_DFP_SUPPORT)
3375 				dig->afmt = adev->mode_info.afmt[dig->dig_encoder];
3376 		}
3377 	}
3378 
3379 	amdgpu_atombios_scratch_regs_lock(adev, true);
3380 
3381 	if (connector) {
3382 		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
3383 
3384 		/* select the clock/data port if it uses a router */
3385 		if (amdgpu_connector->router.cd_valid)
3386 			amdgpu_i2c_router_select_cd_port(amdgpu_connector);
3387 
3388 		/* turn eDP panel on for mode set */
3389 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
3390 			amdgpu_atombios_encoder_set_edp_panel_power(connector,
3391 							     ATOM_TRANSMITTER_ACTION_POWER_ON);
3392 	}
3393 
3394 	/* this is needed for the pll/ss setup to work correctly in some cases */
3395 	amdgpu_atombios_encoder_set_crtc_source(encoder);
3396 	/* set up the FMT blocks */
3397 	dce_v10_0_program_fmt(encoder);
3398 }
3399 
3400 static void dce_v10_0_encoder_commit(struct drm_encoder *encoder)
3401 {
3402 	struct drm_device *dev = encoder->dev;
3403 	struct amdgpu_device *adev = drm_to_adev(dev);
3404 
3405 	/* need to call this here as we need the crtc set up */
3406 	amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
3407 	amdgpu_atombios_scratch_regs_lock(adev, false);
3408 }
3409 
3410 static void dce_v10_0_encoder_disable(struct drm_encoder *encoder)
3411 {
3412 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3413 	struct amdgpu_encoder_atom_dig *dig;
3414 
3415 	amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
3416 
3417 	if (amdgpu_atombios_encoder_is_digital(encoder)) {
3418 		if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
3419 			dce_v10_0_afmt_enable(encoder, false);
3420 		dig = amdgpu_encoder->enc_priv;
3421 		dig->dig_encoder = -1;
3422 	}
3423 	amdgpu_encoder->active_device = 0;
3424 }
3425 
3426 /* these are handled by the primary encoders */
3427 static void dce_v10_0_ext_prepare(struct drm_encoder *encoder)
3428 {
3429 
3430 }
3431 
3432 static void dce_v10_0_ext_commit(struct drm_encoder *encoder)
3433 {
3434 
3435 }
3436 
3437 static void
3438 dce_v10_0_ext_mode_set(struct drm_encoder *encoder,
3439 		      struct drm_display_mode *mode,
3440 		      struct drm_display_mode *adjusted_mode)
3441 {
3442 
3443 }
3444 
3445 static void dce_v10_0_ext_disable(struct drm_encoder *encoder)
3446 {
3447 
3448 }
3449 
3450 static void
3451 dce_v10_0_ext_dpms(struct drm_encoder *encoder, int mode)
3452 {
3453 
3454 }
3455 
3456 static const struct drm_encoder_helper_funcs dce_v10_0_ext_helper_funcs = {
3457 	.dpms = dce_v10_0_ext_dpms,
3458 	.prepare = dce_v10_0_ext_prepare,
3459 	.mode_set = dce_v10_0_ext_mode_set,
3460 	.commit = dce_v10_0_ext_commit,
3461 	.disable = dce_v10_0_ext_disable,
3462 	/* no detect for TMDS/LVDS yet */
3463 };
3464 
3465 static const struct drm_encoder_helper_funcs dce_v10_0_dig_helper_funcs = {
3466 	.dpms = amdgpu_atombios_encoder_dpms,
3467 	.mode_fixup = amdgpu_atombios_encoder_mode_fixup,
3468 	.prepare = dce_v10_0_encoder_prepare,
3469 	.mode_set = dce_v10_0_encoder_mode_set,
3470 	.commit = dce_v10_0_encoder_commit,
3471 	.disable = dce_v10_0_encoder_disable,
3472 	.detect = amdgpu_atombios_encoder_dig_detect,
3473 };
3474 
3475 static const struct drm_encoder_helper_funcs dce_v10_0_dac_helper_funcs = {
3476 	.dpms = amdgpu_atombios_encoder_dpms,
3477 	.mode_fixup = amdgpu_atombios_encoder_mode_fixup,
3478 	.prepare = dce_v10_0_encoder_prepare,
3479 	.mode_set = dce_v10_0_encoder_mode_set,
3480 	.commit = dce_v10_0_encoder_commit,
3481 	.detect = amdgpu_atombios_encoder_dac_detect,
3482 };
3483 
3484 static void dce_v10_0_encoder_destroy(struct drm_encoder *encoder)
3485 {
3486 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3487 	if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
3488 		amdgpu_atombios_encoder_fini_backlight(amdgpu_encoder);
3489 	kfree(amdgpu_encoder->enc_priv);
3490 	drm_encoder_cleanup(encoder);
3491 	kfree(amdgpu_encoder);
3492 }
3493 
3494 static const struct drm_encoder_funcs dce_v10_0_encoder_funcs = {
3495 	.destroy = dce_v10_0_encoder_destroy,
3496 };
3497 
3498 static void dce_v10_0_encoder_add(struct amdgpu_device *adev,
3499 				 uint32_t encoder_enum,
3500 				 uint32_t supported_device,
3501 				 u16 caps)
3502 {
3503 	struct drm_device *dev = adev_to_drm(adev);
3504 	struct drm_encoder *encoder;
3505 	struct amdgpu_encoder *amdgpu_encoder;
3506 
3507 	/* see if we already added it */
3508 	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
3509 		amdgpu_encoder = to_amdgpu_encoder(encoder);
3510 		if (amdgpu_encoder->encoder_enum == encoder_enum) {
3511 			amdgpu_encoder->devices |= supported_device;
3512 			return;
3513 		}
3514 
3515 	}
3516 
3517 	/* add a new one */
3518 	amdgpu_encoder = kzalloc(sizeof(struct amdgpu_encoder), GFP_KERNEL);
3519 	if (!amdgpu_encoder)
3520 		return;
3521 
3522 	encoder = &amdgpu_encoder->base;
3523 	switch (adev->mode_info.num_crtc) {
3524 	case 1:
3525 		encoder->possible_crtcs = 0x1;
3526 		break;
3527 	case 2:
3528 	default:
3529 		encoder->possible_crtcs = 0x3;
3530 		break;
3531 	case 4:
3532 		encoder->possible_crtcs = 0xf;
3533 		break;
3534 	case 6:
3535 		encoder->possible_crtcs = 0x3f;
3536 		break;
3537 	}
3538 
3539 	amdgpu_encoder->enc_priv = NULL;
3540 
3541 	amdgpu_encoder->encoder_enum = encoder_enum;
3542 	amdgpu_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
3543 	amdgpu_encoder->devices = supported_device;
3544 	amdgpu_encoder->rmx_type = RMX_OFF;
3545 	amdgpu_encoder->underscan_type = UNDERSCAN_OFF;
3546 	amdgpu_encoder->is_ext_encoder = false;
3547 	amdgpu_encoder->caps = caps;
3548 
3549 	switch (amdgpu_encoder->encoder_id) {
3550 	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
3551 	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
3552 		drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs,
3553 				 DRM_MODE_ENCODER_DAC, NULL);
3554 		drm_encoder_helper_add(encoder, &dce_v10_0_dac_helper_funcs);
3555 		break;
3556 	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
3557 	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
3558 	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
3559 	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
3560 	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
3561 		if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
3562 			amdgpu_encoder->rmx_type = RMX_FULL;
3563 			drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs,
3564 					 DRM_MODE_ENCODER_LVDS, NULL);
3565 			amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_lcd_info(amdgpu_encoder);
3566 		} else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) {
3567 			drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs,
3568 					 DRM_MODE_ENCODER_DAC, NULL);
3569 			amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
3570 		} else {
3571 			drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs,
3572 					 DRM_MODE_ENCODER_TMDS, NULL);
3573 			amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
3574 		}
3575 		drm_encoder_helper_add(encoder, &dce_v10_0_dig_helper_funcs);
3576 		break;
3577 	case ENCODER_OBJECT_ID_SI170B:
3578 	case ENCODER_OBJECT_ID_CH7303:
3579 	case ENCODER_OBJECT_ID_EXTERNAL_SDVOA:
3580 	case ENCODER_OBJECT_ID_EXTERNAL_SDVOB:
3581 	case ENCODER_OBJECT_ID_TITFP513:
3582 	case ENCODER_OBJECT_ID_VT1623:
3583 	case ENCODER_OBJECT_ID_HDMI_SI1930:
3584 	case ENCODER_OBJECT_ID_TRAVIS:
3585 	case ENCODER_OBJECT_ID_NUTMEG:
3586 		/* these are handled by the primary encoders */
3587 		amdgpu_encoder->is_ext_encoder = true;
3588 		if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
3589 			drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs,
3590 					 DRM_MODE_ENCODER_LVDS, NULL);
3591 		else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT))
3592 			drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs,
3593 					 DRM_MODE_ENCODER_DAC, NULL);
3594 		else
3595 			drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs,
3596 					 DRM_MODE_ENCODER_TMDS, NULL);
3597 		drm_encoder_helper_add(encoder, &dce_v10_0_ext_helper_funcs);
3598 		break;
3599 	}
3600 }
3601 
3602 static const struct amdgpu_display_funcs dce_v10_0_display_funcs = {
3603 	.bandwidth_update = &dce_v10_0_bandwidth_update,
3604 	.vblank_get_counter = &dce_v10_0_vblank_get_counter,
3605 	.backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level,
3606 	.backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level,
3607 	.hpd_sense = &dce_v10_0_hpd_sense,
3608 	.hpd_set_polarity = &dce_v10_0_hpd_set_polarity,
3609 	.hpd_get_gpio_reg = &dce_v10_0_hpd_get_gpio_reg,
3610 	.page_flip = &dce_v10_0_page_flip,
3611 	.page_flip_get_scanoutpos = &dce_v10_0_crtc_get_scanoutpos,
3612 	.add_encoder = &dce_v10_0_encoder_add,
3613 	.add_connector = &amdgpu_connector_add,
3614 };
3615 
3616 static void dce_v10_0_set_display_funcs(struct amdgpu_device *adev)
3617 {
3618 	adev->mode_info.funcs = &dce_v10_0_display_funcs;
3619 }
3620 
3621 static const struct amdgpu_irq_src_funcs dce_v10_0_crtc_irq_funcs = {
3622 	.set = dce_v10_0_set_crtc_irq_state,
3623 	.process = dce_v10_0_crtc_irq,
3624 };
3625 
3626 static const struct amdgpu_irq_src_funcs dce_v10_0_pageflip_irq_funcs = {
3627 	.set = dce_v10_0_set_pageflip_irq_state,
3628 	.process = dce_v10_0_pageflip_irq,
3629 };
3630 
3631 static const struct amdgpu_irq_src_funcs dce_v10_0_hpd_irq_funcs = {
3632 	.set = dce_v10_0_set_hpd_irq_state,
3633 	.process = dce_v10_0_hpd_irq,
3634 };
3635 
3636 static void dce_v10_0_set_irq_funcs(struct amdgpu_device *adev)
3637 {
3638 	if (adev->mode_info.num_crtc > 0)
3639 		adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_VLINE1 + adev->mode_info.num_crtc;
3640 	else
3641 		adev->crtc_irq.num_types = 0;
3642 	adev->crtc_irq.funcs = &dce_v10_0_crtc_irq_funcs;
3643 
3644 	adev->pageflip_irq.num_types = adev->mode_info.num_crtc;
3645 	adev->pageflip_irq.funcs = &dce_v10_0_pageflip_irq_funcs;
3646 
3647 	adev->hpd_irq.num_types = adev->mode_info.num_hpd;
3648 	adev->hpd_irq.funcs = &dce_v10_0_hpd_irq_funcs;
3649 }
3650 
3651 const struct amdgpu_ip_block_version dce_v10_0_ip_block = {
3652 	.type = AMD_IP_BLOCK_TYPE_DCE,
3653 	.major = 10,
3654 	.minor = 0,
3655 	.rev = 0,
3656 	.funcs = &dce_v10_0_ip_funcs,
3657 };
3658 
3659 const struct amdgpu_ip_block_version dce_v10_1_ip_block = {
3660 	.type = AMD_IP_BLOCK_TYPE_DCE,
3661 	.major = 10,
3662 	.minor = 1,
3663 	.rev = 0,
3664 	.funcs = &dce_v10_0_ip_funcs,
3665 };
3666