xref: /linux/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c (revision e0a37f85fc95e3f2550446316bc4a27d00d75993)
1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include "drmP.h"
24 #include "amdgpu.h"
25 #include "amdgpu_pm.h"
26 #include "amdgpu_i2c.h"
27 #include "cikd.h"
28 #include "atom.h"
29 #include "amdgpu_atombios.h"
30 #include "atombios_crtc.h"
31 #include "atombios_encoders.h"
32 #include "amdgpu_pll.h"
33 #include "amdgpu_connectors.h"
34 
35 #include "dce/dce_8_0_d.h"
36 #include "dce/dce_8_0_sh_mask.h"
37 
38 #include "gca/gfx_7_2_enum.h"
39 
40 #include "gmc/gmc_7_1_d.h"
41 #include "gmc/gmc_7_1_sh_mask.h"
42 
43 #include "oss/oss_2_0_d.h"
44 #include "oss/oss_2_0_sh_mask.h"
45 
46 static void dce_v8_0_set_display_funcs(struct amdgpu_device *adev);
47 static void dce_v8_0_set_irq_funcs(struct amdgpu_device *adev);
48 
49 static const u32 crtc_offsets[6] =
50 {
51 	CRTC0_REGISTER_OFFSET,
52 	CRTC1_REGISTER_OFFSET,
53 	CRTC2_REGISTER_OFFSET,
54 	CRTC3_REGISTER_OFFSET,
55 	CRTC4_REGISTER_OFFSET,
56 	CRTC5_REGISTER_OFFSET
57 };
58 
59 static const uint32_t dig_offsets[] = {
60 	CRTC0_REGISTER_OFFSET,
61 	CRTC1_REGISTER_OFFSET,
62 	CRTC2_REGISTER_OFFSET,
63 	CRTC3_REGISTER_OFFSET,
64 	CRTC4_REGISTER_OFFSET,
65 	CRTC5_REGISTER_OFFSET,
66 	(0x13830 - 0x7030) >> 2,
67 };
68 
69 static const struct {
70 	uint32_t	reg;
71 	uint32_t	vblank;
72 	uint32_t	vline;
73 	uint32_t	hpd;
74 
75 } interrupt_status_offsets[6] = { {
76 	.reg = mmDISP_INTERRUPT_STATUS,
77 	.vblank = DISP_INTERRUPT_STATUS__LB_D1_VBLANK_INTERRUPT_MASK,
78 	.vline = DISP_INTERRUPT_STATUS__LB_D1_VLINE_INTERRUPT_MASK,
79 	.hpd = DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT_MASK
80 }, {
81 	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE,
82 	.vblank = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VBLANK_INTERRUPT_MASK,
83 	.vline = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VLINE_INTERRUPT_MASK,
84 	.hpd = DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_INTERRUPT_MASK
85 }, {
86 	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE2,
87 	.vblank = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VBLANK_INTERRUPT_MASK,
88 	.vline = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VLINE_INTERRUPT_MASK,
89 	.hpd = DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_INTERRUPT_MASK
90 }, {
91 	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE3,
92 	.vblank = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VBLANK_INTERRUPT_MASK,
93 	.vline = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VLINE_INTERRUPT_MASK,
94 	.hpd = DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_INTERRUPT_MASK
95 }, {
96 	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE4,
97 	.vblank = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VBLANK_INTERRUPT_MASK,
98 	.vline = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VLINE_INTERRUPT_MASK,
99 	.hpd = DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_INTERRUPT_MASK
100 }, {
101 	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE5,
102 	.vblank = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VBLANK_INTERRUPT_MASK,
103 	.vline = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VLINE_INTERRUPT_MASK,
104 	.hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK
105 } };
106 
107 static const uint32_t hpd_int_control_offsets[6] = {
108 	mmDC_HPD1_INT_CONTROL,
109 	mmDC_HPD2_INT_CONTROL,
110 	mmDC_HPD3_INT_CONTROL,
111 	mmDC_HPD4_INT_CONTROL,
112 	mmDC_HPD5_INT_CONTROL,
113 	mmDC_HPD6_INT_CONTROL,
114 };
115 
116 static u32 dce_v8_0_audio_endpt_rreg(struct amdgpu_device *adev,
117 				     u32 block_offset, u32 reg)
118 {
119 	unsigned long flags;
120 	u32 r;
121 
122 	spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags);
123 	WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
124 	r = RREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset);
125 	spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
126 
127 	return r;
128 }
129 
130 static void dce_v8_0_audio_endpt_wreg(struct amdgpu_device *adev,
131 				      u32 block_offset, u32 reg, u32 v)
132 {
133 	unsigned long flags;
134 
135 	spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags);
136 	WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
137 	WREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset, v);
138 	spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
139 }
140 
141 static bool dce_v8_0_is_in_vblank(struct amdgpu_device *adev, int crtc)
142 {
143 	if (RREG32(mmCRTC_STATUS + crtc_offsets[crtc]) &
144 			CRTC_V_BLANK_START_END__CRTC_V_BLANK_START_MASK)
145 		return true;
146 	else
147 		return false;
148 }
149 
150 static bool dce_v8_0_is_counter_moving(struct amdgpu_device *adev, int crtc)
151 {
152 	u32 pos1, pos2;
153 
154 	pos1 = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
155 	pos2 = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
156 
157 	if (pos1 != pos2)
158 		return true;
159 	else
160 		return false;
161 }
162 
163 /**
164  * dce_v8_0_vblank_wait - vblank wait asic callback.
165  *
166  * @adev: amdgpu_device pointer
167  * @crtc: crtc to wait for vblank on
168  *
169  * Wait for vblank on the requested crtc (evergreen+).
170  */
171 static void dce_v8_0_vblank_wait(struct amdgpu_device *adev, int crtc)
172 {
173 	unsigned i = 0;
174 
175 	if (crtc >= adev->mode_info.num_crtc)
176 		return;
177 
178 	if (!(RREG32(mmCRTC_CONTROL + crtc_offsets[crtc]) & CRTC_CONTROL__CRTC_MASTER_EN_MASK))
179 		return;
180 
181 	/* depending on when we hit vblank, we may be close to active; if so,
182 	 * wait for another frame.
183 	 */
184 	while (dce_v8_0_is_in_vblank(adev, crtc)) {
185 		if (i++ % 100 == 0) {
186 			if (!dce_v8_0_is_counter_moving(adev, crtc))
187 				break;
188 		}
189 	}
190 
191 	while (!dce_v8_0_is_in_vblank(adev, crtc)) {
192 		if (i++ % 100 == 0) {
193 			if (!dce_v8_0_is_counter_moving(adev, crtc))
194 				break;
195 		}
196 	}
197 }
198 
199 static u32 dce_v8_0_vblank_get_counter(struct amdgpu_device *adev, int crtc)
200 {
201 	if (crtc >= adev->mode_info.num_crtc)
202 		return 0;
203 	else
204 		return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
205 }
206 
207 /**
208  * dce_v8_0_page_flip - pageflip callback.
209  *
210  * @adev: amdgpu_device pointer
211  * @crtc_id: crtc to cleanup pageflip on
212  * @crtc_base: new address of the crtc (GPU MC address)
213  *
214  * Does the actual pageflip (evergreen+).
215  * During vblank we take the crtc lock and wait for the update_pending
216  * bit to go high, when it does, we release the lock, and allow the
217  * double buffered update to take place.
218  * Returns the current update pending status.
219  */
220 static void dce_v8_0_page_flip(struct amdgpu_device *adev,
221 			      int crtc_id, u64 crtc_base)
222 {
223 	struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
224 	u32 tmp = RREG32(mmGRPH_UPDATE + amdgpu_crtc->crtc_offset);
225 	int i;
226 
227 	/* Lock the graphics update lock */
228 	tmp |= GRPH_UPDATE__GRPH_UPDATE_LOCK_MASK;
229 	WREG32(mmGRPH_UPDATE + amdgpu_crtc->crtc_offset, tmp);
230 
231 	/* update the scanout addresses */
232 	WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
233 	       upper_32_bits(crtc_base));
234 	WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
235 	       (u32)crtc_base);
236 
237 	WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
238 	       upper_32_bits(crtc_base));
239 	WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
240 	       (u32)crtc_base);
241 
242 	/* Wait for update_pending to go high. */
243 	for (i = 0; i < adev->usec_timeout; i++) {
244 		if (RREG32(mmGRPH_UPDATE + amdgpu_crtc->crtc_offset) &
245 				GRPH_UPDATE__GRPH_SURFACE_UPDATE_PENDING_MASK)
246 			break;
247 		udelay(1);
248 	}
249 	DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
250 
251 	/* Unlock the lock, so double-buffering can take place inside vblank */
252 	tmp &= ~GRPH_UPDATE__GRPH_UPDATE_LOCK_MASK;
253 	WREG32(mmGRPH_UPDATE + amdgpu_crtc->crtc_offset, tmp);
254 }
255 
256 static int dce_v8_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
257 					u32 *vbl, u32 *position)
258 {
259 	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
260 		return -EINVAL;
261 
262 	*vbl = RREG32(mmCRTC_V_BLANK_START_END + crtc_offsets[crtc]);
263 	*position = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
264 
265 	return 0;
266 }
267 
268 /**
269  * dce_v8_0_hpd_sense - hpd sense callback.
270  *
271  * @adev: amdgpu_device pointer
272  * @hpd: hpd (hotplug detect) pin
273  *
274  * Checks if a digital monitor is connected (evergreen+).
275  * Returns true if connected, false if not connected.
276  */
277 static bool dce_v8_0_hpd_sense(struct amdgpu_device *adev,
278 			       enum amdgpu_hpd_id hpd)
279 {
280 	bool connected = false;
281 
282 	switch (hpd) {
283 	case AMDGPU_HPD_1:
284 		if (RREG32(mmDC_HPD1_INT_STATUS) & DC_HPD1_INT_STATUS__DC_HPD1_SENSE_MASK)
285 			connected = true;
286 		break;
287 	case AMDGPU_HPD_2:
288 		if (RREG32(mmDC_HPD2_INT_STATUS) & DC_HPD2_INT_STATUS__DC_HPD2_SENSE_MASK)
289 			connected = true;
290 		break;
291 	case AMDGPU_HPD_3:
292 		if (RREG32(mmDC_HPD3_INT_STATUS) & DC_HPD3_INT_STATUS__DC_HPD3_SENSE_MASK)
293 			connected = true;
294 		break;
295 	case AMDGPU_HPD_4:
296 		if (RREG32(mmDC_HPD4_INT_STATUS) & DC_HPD4_INT_STATUS__DC_HPD4_SENSE_MASK)
297 			connected = true;
298 		break;
299 	case AMDGPU_HPD_5:
300 		if (RREG32(mmDC_HPD5_INT_STATUS) & DC_HPD5_INT_STATUS__DC_HPD5_SENSE_MASK)
301 			connected = true;
302 		break;
303 	case AMDGPU_HPD_6:
304 		if (RREG32(mmDC_HPD6_INT_STATUS) & DC_HPD6_INT_STATUS__DC_HPD6_SENSE_MASK)
305 			connected = true;
306 		break;
307 	default:
308 		break;
309 	}
310 
311 	return connected;
312 }
313 
314 /**
315  * dce_v8_0_hpd_set_polarity - hpd set polarity callback.
316  *
317  * @adev: amdgpu_device pointer
318  * @hpd: hpd (hotplug detect) pin
319  *
320  * Set the polarity of the hpd pin (evergreen+).
321  */
322 static void dce_v8_0_hpd_set_polarity(struct amdgpu_device *adev,
323 				      enum amdgpu_hpd_id hpd)
324 {
325 	u32 tmp;
326 	bool connected = dce_v8_0_hpd_sense(adev, hpd);
327 
328 	switch (hpd) {
329 	case AMDGPU_HPD_1:
330 		tmp = RREG32(mmDC_HPD1_INT_CONTROL);
331 		if (connected)
332 			tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK;
333 		else
334 			tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK;
335 		WREG32(mmDC_HPD1_INT_CONTROL, tmp);
336 		break;
337 	case AMDGPU_HPD_2:
338 		tmp = RREG32(mmDC_HPD2_INT_CONTROL);
339 		if (connected)
340 			tmp &= ~DC_HPD2_INT_CONTROL__DC_HPD2_INT_POLARITY_MASK;
341 		else
342 			tmp |= DC_HPD2_INT_CONTROL__DC_HPD2_INT_POLARITY_MASK;
343 		WREG32(mmDC_HPD2_INT_CONTROL, tmp);
344 		break;
345 	case AMDGPU_HPD_3:
346 		tmp = RREG32(mmDC_HPD3_INT_CONTROL);
347 		if (connected)
348 			tmp &= ~DC_HPD3_INT_CONTROL__DC_HPD3_INT_POLARITY_MASK;
349 		else
350 			tmp |= DC_HPD3_INT_CONTROL__DC_HPD3_INT_POLARITY_MASK;
351 		WREG32(mmDC_HPD3_INT_CONTROL, tmp);
352 		break;
353 	case AMDGPU_HPD_4:
354 		tmp = RREG32(mmDC_HPD4_INT_CONTROL);
355 		if (connected)
356 			tmp &= ~DC_HPD4_INT_CONTROL__DC_HPD4_INT_POLARITY_MASK;
357 		else
358 			tmp |= DC_HPD4_INT_CONTROL__DC_HPD4_INT_POLARITY_MASK;
359 		WREG32(mmDC_HPD4_INT_CONTROL, tmp);
360 		break;
361 	case AMDGPU_HPD_5:
362 		tmp = RREG32(mmDC_HPD5_INT_CONTROL);
363 		if (connected)
364 			tmp &= ~DC_HPD5_INT_CONTROL__DC_HPD5_INT_POLARITY_MASK;
365 		else
366 			tmp |= DC_HPD5_INT_CONTROL__DC_HPD5_INT_POLARITY_MASK;
367 		WREG32(mmDC_HPD5_INT_CONTROL, tmp);
368 			break;
369 	case AMDGPU_HPD_6:
370 		tmp = RREG32(mmDC_HPD6_INT_CONTROL);
371 		if (connected)
372 			tmp &= ~DC_HPD6_INT_CONTROL__DC_HPD6_INT_POLARITY_MASK;
373 		else
374 			tmp |= DC_HPD6_INT_CONTROL__DC_HPD6_INT_POLARITY_MASK;
375 		WREG32(mmDC_HPD6_INT_CONTROL, tmp);
376 		break;
377 	default:
378 		break;
379 	}
380 }
381 
382 /**
383  * dce_v8_0_hpd_init - hpd setup callback.
384  *
385  * @adev: amdgpu_device pointer
386  *
387  * Setup the hpd pins used by the card (evergreen+).
388  * Enable the pin, set the polarity, and enable the hpd interrupts.
389  */
390 static void dce_v8_0_hpd_init(struct amdgpu_device *adev)
391 {
392 	struct drm_device *dev = adev->ddev;
393 	struct drm_connector *connector;
394 	u32 tmp = (0x9c4 << DC_HPD1_CONTROL__DC_HPD1_CONNECTION_TIMER__SHIFT) |
395 		(0xfa << DC_HPD1_CONTROL__DC_HPD1_RX_INT_TIMER__SHIFT) |
396 		DC_HPD1_CONTROL__DC_HPD1_EN_MASK;
397 
398 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
399 		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
400 
401 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
402 		    connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
403 			/* don't try to enable hpd on eDP or LVDS avoid breaking the
404 			 * aux dp channel on imac and help (but not completely fix)
405 			 * https://bugzilla.redhat.com/show_bug.cgi?id=726143
406 			 * also avoid interrupt storms during dpms.
407 			 */
408 			continue;
409 		}
410 		switch (amdgpu_connector->hpd.hpd) {
411 		case AMDGPU_HPD_1:
412 			WREG32(mmDC_HPD1_CONTROL, tmp);
413 			break;
414 		case AMDGPU_HPD_2:
415 			WREG32(mmDC_HPD2_CONTROL, tmp);
416 			break;
417 		case AMDGPU_HPD_3:
418 			WREG32(mmDC_HPD3_CONTROL, tmp);
419 			break;
420 		case AMDGPU_HPD_4:
421 			WREG32(mmDC_HPD4_CONTROL, tmp);
422 			break;
423 		case AMDGPU_HPD_5:
424 			WREG32(mmDC_HPD5_CONTROL, tmp);
425 			break;
426 		case AMDGPU_HPD_6:
427 			WREG32(mmDC_HPD6_CONTROL, tmp);
428 			break;
429 		default:
430 			break;
431 		}
432 		dce_v8_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
433 		amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
434 	}
435 }
436 
437 /**
438  * dce_v8_0_hpd_fini - hpd tear down callback.
439  *
440  * @adev: amdgpu_device pointer
441  *
442  * Tear down the hpd pins used by the card (evergreen+).
443  * Disable the hpd interrupts.
444  */
445 static void dce_v8_0_hpd_fini(struct amdgpu_device *adev)
446 {
447 	struct drm_device *dev = adev->ddev;
448 	struct drm_connector *connector;
449 
450 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
451 		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
452 
453 		switch (amdgpu_connector->hpd.hpd) {
454 		case AMDGPU_HPD_1:
455 			WREG32(mmDC_HPD1_CONTROL, 0);
456 			break;
457 		case AMDGPU_HPD_2:
458 			WREG32(mmDC_HPD2_CONTROL, 0);
459 			break;
460 		case AMDGPU_HPD_3:
461 			WREG32(mmDC_HPD3_CONTROL, 0);
462 			break;
463 		case AMDGPU_HPD_4:
464 			WREG32(mmDC_HPD4_CONTROL, 0);
465 			break;
466 		case AMDGPU_HPD_5:
467 			WREG32(mmDC_HPD5_CONTROL, 0);
468 			break;
469 		case AMDGPU_HPD_6:
470 			WREG32(mmDC_HPD6_CONTROL, 0);
471 			break;
472 		default:
473 			break;
474 		}
475 		amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
476 	}
477 }
478 
479 static u32 dce_v8_0_hpd_get_gpio_reg(struct amdgpu_device *adev)
480 {
481 	return mmDC_GPIO_HPD_A;
482 }
483 
484 static bool dce_v8_0_is_display_hung(struct amdgpu_device *adev)
485 {
486 	u32 crtc_hung = 0;
487 	u32 crtc_status[6];
488 	u32 i, j, tmp;
489 
490 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
491 		if (RREG32(mmCRTC_CONTROL + crtc_offsets[i]) & CRTC_CONTROL__CRTC_MASTER_EN_MASK) {
492 			crtc_status[i] = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]);
493 			crtc_hung |= (1 << i);
494 		}
495 	}
496 
497 	for (j = 0; j < 10; j++) {
498 		for (i = 0; i < adev->mode_info.num_crtc; i++) {
499 			if (crtc_hung & (1 << i)) {
500 				tmp = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]);
501 				if (tmp != crtc_status[i])
502 					crtc_hung &= ~(1 << i);
503 			}
504 		}
505 		if (crtc_hung == 0)
506 			return false;
507 		udelay(100);
508 	}
509 
510 	return true;
511 }
512 
513 static void dce_v8_0_stop_mc_access(struct amdgpu_device *adev,
514 				    struct amdgpu_mode_mc_save *save)
515 {
516 	u32 crtc_enabled, tmp;
517 	int i;
518 
519 	save->vga_render_control = RREG32(mmVGA_RENDER_CONTROL);
520 	save->vga_hdp_control = RREG32(mmVGA_HDP_CONTROL);
521 
522 	/* disable VGA render */
523 	tmp = RREG32(mmVGA_RENDER_CONTROL);
524 	tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
525 	WREG32(mmVGA_RENDER_CONTROL, tmp);
526 
527 	/* blank the display controllers */
528 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
529 		crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]),
530 					     CRTC_CONTROL, CRTC_MASTER_EN);
531 		if (crtc_enabled) {
532 #if 0
533 			u32 frame_count;
534 			int j;
535 
536 			save->crtc_enabled[i] = true;
537 			tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]);
538 			if (REG_GET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN) == 0) {
539 				amdgpu_display_vblank_wait(adev, i);
540 				WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
541 				tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 1);
542 				WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
543 				WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
544 			}
545 			/* wait for the next frame */
546 			frame_count = amdgpu_display_vblank_get_counter(adev, i);
547 			for (j = 0; j < adev->usec_timeout; j++) {
548 				if (amdgpu_display_vblank_get_counter(adev, i) != frame_count)
549 					break;
550 				udelay(1);
551 			}
552 			tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]);
553 			if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK) == 0) {
554 				tmp = REG_SET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK, 1);
555 				WREG32(mmGRPH_UPDATE + crtc_offsets[i], tmp);
556 			}
557 			tmp = RREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i]);
558 			if (REG_GET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK) == 0) {
559 				tmp = REG_SET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK, 1);
560 				WREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
561 			}
562 #else
563 			/* XXX this is a hack to avoid strange behavior with EFI on certain systems */
564 			WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
565 			tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
566 			tmp = REG_SET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN, 0);
567 			WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp);
568 			WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
569 			save->crtc_enabled[i] = false;
570 			/* ***** */
571 #endif
572 		} else {
573 			save->crtc_enabled[i] = false;
574 		}
575 	}
576 }
577 
578 static void dce_v8_0_resume_mc_access(struct amdgpu_device *adev,
579 				      struct amdgpu_mode_mc_save *save)
580 {
581 	u32 tmp, frame_count;
582 	int i, j;
583 
584 	/* update crtc base addresses */
585 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
586 		WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
587 		       upper_32_bits(adev->mc.vram_start));
588 		WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
589 		       upper_32_bits(adev->mc.vram_start));
590 		WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i],
591 		       (u32)adev->mc.vram_start);
592 		WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i],
593 		       (u32)adev->mc.vram_start);
594 
595 		if (save->crtc_enabled[i]) {
596 			tmp = RREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i]);
597 			if (REG_GET_FIELD(tmp, MASTER_UPDATE_MODE, MASTER_UPDATE_MODE) != 3) {
598 				tmp = REG_SET_FIELD(tmp, MASTER_UPDATE_MODE, MASTER_UPDATE_MODE, 3);
599 				WREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i], tmp);
600 			}
601 			tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]);
602 			if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK)) {
603 				tmp = REG_SET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK, 0);
604 				WREG32(mmGRPH_UPDATE + crtc_offsets[i], tmp);
605 			}
606 			tmp = RREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i]);
607 			if (REG_GET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK)) {
608 				tmp = REG_SET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK, 0);
609 				WREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
610 			}
611 			for (j = 0; j < adev->usec_timeout; j++) {
612 				tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]);
613 				if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_SURFACE_UPDATE_PENDING) == 0)
614 					break;
615 				udelay(1);
616 			}
617 			tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]);
618 			tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 0);
619 			WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
620 			WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
621 			WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
622 			/* wait for the next frame */
623 			frame_count = amdgpu_display_vblank_get_counter(adev, i);
624 			for (j = 0; j < adev->usec_timeout; j++) {
625 				if (amdgpu_display_vblank_get_counter(adev, i) != frame_count)
626 					break;
627 				udelay(1);
628 			}
629 		}
630 	}
631 
632 	WREG32(mmVGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(adev->mc.vram_start));
633 	WREG32(mmVGA_MEMORY_BASE_ADDRESS, lower_32_bits(adev->mc.vram_start));
634 
635 	/* Unlock vga access */
636 	WREG32(mmVGA_HDP_CONTROL, save->vga_hdp_control);
637 	mdelay(1);
638 	WREG32(mmVGA_RENDER_CONTROL, save->vga_render_control);
639 }
640 
641 static void dce_v8_0_set_vga_render_state(struct amdgpu_device *adev,
642 					  bool render)
643 {
644 	u32 tmp;
645 
646 	/* Lockout access through VGA aperture*/
647 	tmp = RREG32(mmVGA_HDP_CONTROL);
648 	if (render)
649 		tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 0);
650 	else
651 		tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
652 	WREG32(mmVGA_HDP_CONTROL, tmp);
653 
654 	/* disable VGA render */
655 	tmp = RREG32(mmVGA_RENDER_CONTROL);
656 	if (render)
657 		tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 1);
658 	else
659 		tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
660 	WREG32(mmVGA_RENDER_CONTROL, tmp);
661 }
662 
663 static void dce_v8_0_program_fmt(struct drm_encoder *encoder)
664 {
665 	struct drm_device *dev = encoder->dev;
666 	struct amdgpu_device *adev = dev->dev_private;
667 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
668 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
669 	struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
670 	int bpc = 0;
671 	u32 tmp = 0;
672 	enum amdgpu_connector_dither dither = AMDGPU_FMT_DITHER_DISABLE;
673 
674 	if (connector) {
675 		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
676 		bpc = amdgpu_connector_get_monitor_bpc(connector);
677 		dither = amdgpu_connector->dither;
678 	}
679 
680 	/* LVDS/eDP FMT is set up by atom */
681 	if (amdgpu_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
682 		return;
683 
684 	/* not needed for analog */
685 	if ((amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1) ||
686 	    (amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2))
687 		return;
688 
689 	if (bpc == 0)
690 		return;
691 
692 	switch (bpc) {
693 	case 6:
694 		if (dither == AMDGPU_FMT_DITHER_ENABLE)
695 			/* XXX sort out optimal dither settings */
696 			tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK |
697 				FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK |
698 				FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK |
699 				(0 << FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH__SHIFT));
700 		else
701 			tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK |
702 			(0 << FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH__SHIFT));
703 		break;
704 	case 8:
705 		if (dither == AMDGPU_FMT_DITHER_ENABLE)
706 			/* XXX sort out optimal dither settings */
707 			tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK |
708 				FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK |
709 				FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE_MASK |
710 				FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK |
711 				(1 << FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH__SHIFT));
712 		else
713 			tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK |
714 			(1 << FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH__SHIFT));
715 		break;
716 	case 10:
717 		if (dither == AMDGPU_FMT_DITHER_ENABLE)
718 			/* XXX sort out optimal dither settings */
719 			tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK |
720 				FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK |
721 				FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE_MASK |
722 				FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK |
723 				(2 << FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH__SHIFT));
724 		else
725 			tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK |
726 			(2 << FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH__SHIFT));
727 		break;
728 	default:
729 		/* not needed */
730 		break;
731 	}
732 
733 	WREG32(mmFMT_BIT_DEPTH_CONTROL + amdgpu_crtc->crtc_offset, tmp);
734 }
735 
736 
737 /* display watermark setup */
738 /**
739  * dce_v8_0_line_buffer_adjust - Set up the line buffer
740  *
741  * @adev: amdgpu_device pointer
742  * @amdgpu_crtc: the selected display controller
743  * @mode: the current display mode on the selected display
744  * controller
745  *
746  * Setup up the line buffer allocation for
747  * the selected display controller (CIK).
748  * Returns the line buffer size in pixels.
749  */
750 static u32 dce_v8_0_line_buffer_adjust(struct amdgpu_device *adev,
751 				       struct amdgpu_crtc *amdgpu_crtc,
752 				       struct drm_display_mode *mode)
753 {
754 	u32 tmp, buffer_alloc, i;
755 	u32 pipe_offset = amdgpu_crtc->crtc_id * 0x8;
756 	/*
757 	 * Line Buffer Setup
758 	 * There are 6 line buffers, one for each display controllers.
759 	 * There are 3 partitions per LB. Select the number of partitions
760 	 * to enable based on the display width.  For display widths larger
761 	 * than 4096, you need use to use 2 display controllers and combine
762 	 * them using the stereo blender.
763 	 */
764 	if (amdgpu_crtc->base.enabled && mode) {
765 		if (mode->crtc_hdisplay < 1920) {
766 			tmp = 1;
767 			buffer_alloc = 2;
768 		} else if (mode->crtc_hdisplay < 2560) {
769 			tmp = 2;
770 			buffer_alloc = 2;
771 		} else if (mode->crtc_hdisplay < 4096) {
772 			tmp = 0;
773 			buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4;
774 		} else {
775 			DRM_DEBUG_KMS("Mode too big for LB!\n");
776 			tmp = 0;
777 			buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4;
778 		}
779 	} else {
780 		tmp = 1;
781 		buffer_alloc = 0;
782 	}
783 
784 	WREG32(mmLB_MEMORY_CTRL + amdgpu_crtc->crtc_offset,
785 	      (tmp << LB_MEMORY_CTRL__LB_MEMORY_CONFIG__SHIFT) |
786 	      (0x6B0 << LB_MEMORY_CTRL__LB_MEMORY_SIZE__SHIFT));
787 
788 	WREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset,
789 	       (buffer_alloc << PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATED__SHIFT));
790 	for (i = 0; i < adev->usec_timeout; i++) {
791 		if (RREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset) &
792 		    PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATION_COMPLETED_MASK)
793 			break;
794 		udelay(1);
795 	}
796 
797 	if (amdgpu_crtc->base.enabled && mode) {
798 		switch (tmp) {
799 		case 0:
800 		default:
801 			return 4096 * 2;
802 		case 1:
803 			return 1920 * 2;
804 		case 2:
805 			return 2560 * 2;
806 		}
807 	}
808 
809 	/* controller not enabled, so no lb used */
810 	return 0;
811 }
812 
813 /**
814  * cik_get_number_of_dram_channels - get the number of dram channels
815  *
816  * @adev: amdgpu_device pointer
817  *
818  * Look up the number of video ram channels (CIK).
819  * Used for display watermark bandwidth calculations
820  * Returns the number of dram channels
821  */
822 static u32 cik_get_number_of_dram_channels(struct amdgpu_device *adev)
823 {
824 	u32 tmp = RREG32(mmMC_SHARED_CHMAP);
825 
826 	switch ((tmp & MC_SHARED_CHMAP__NOOFCHAN_MASK) >> MC_SHARED_CHMAP__NOOFCHAN__SHIFT) {
827 	case 0:
828 	default:
829 		return 1;
830 	case 1:
831 		return 2;
832 	case 2:
833 		return 4;
834 	case 3:
835 		return 8;
836 	case 4:
837 		return 3;
838 	case 5:
839 		return 6;
840 	case 6:
841 		return 10;
842 	case 7:
843 		return 12;
844 	case 8:
845 		return 16;
846 	}
847 }
848 
849 struct dce8_wm_params {
850 	u32 dram_channels; /* number of dram channels */
851 	u32 yclk;          /* bandwidth per dram data pin in kHz */
852 	u32 sclk;          /* engine clock in kHz */
853 	u32 disp_clk;      /* display clock in kHz */
854 	u32 src_width;     /* viewport width */
855 	u32 active_time;   /* active display time in ns */
856 	u32 blank_time;    /* blank time in ns */
857 	bool interlaced;    /* mode is interlaced */
858 	fixed20_12 vsc;    /* vertical scale ratio */
859 	u32 num_heads;     /* number of active crtcs */
860 	u32 bytes_per_pixel; /* bytes per pixel display + overlay */
861 	u32 lb_size;       /* line buffer allocated to pipe */
862 	u32 vtaps;         /* vertical scaler taps */
863 };
864 
865 /**
866  * dce_v8_0_dram_bandwidth - get the dram bandwidth
867  *
868  * @wm: watermark calculation data
869  *
870  * Calculate the raw dram bandwidth (CIK).
871  * Used for display watermark bandwidth calculations
872  * Returns the dram bandwidth in MBytes/s
873  */
874 static u32 dce_v8_0_dram_bandwidth(struct dce8_wm_params *wm)
875 {
876 	/* Calculate raw DRAM Bandwidth */
877 	fixed20_12 dram_efficiency; /* 0.7 */
878 	fixed20_12 yclk, dram_channels, bandwidth;
879 	fixed20_12 a;
880 
881 	a.full = dfixed_const(1000);
882 	yclk.full = dfixed_const(wm->yclk);
883 	yclk.full = dfixed_div(yclk, a);
884 	dram_channels.full = dfixed_const(wm->dram_channels * 4);
885 	a.full = dfixed_const(10);
886 	dram_efficiency.full = dfixed_const(7);
887 	dram_efficiency.full = dfixed_div(dram_efficiency, a);
888 	bandwidth.full = dfixed_mul(dram_channels, yclk);
889 	bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);
890 
891 	return dfixed_trunc(bandwidth);
892 }
893 
894 /**
895  * dce_v8_0_dram_bandwidth_for_display - get the dram bandwidth for display
896  *
897  * @wm: watermark calculation data
898  *
899  * Calculate the dram bandwidth used for display (CIK).
900  * Used for display watermark bandwidth calculations
901  * Returns the dram bandwidth for display in MBytes/s
902  */
903 static u32 dce_v8_0_dram_bandwidth_for_display(struct dce8_wm_params *wm)
904 {
905 	/* Calculate DRAM Bandwidth and the part allocated to display. */
906 	fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */
907 	fixed20_12 yclk, dram_channels, bandwidth;
908 	fixed20_12 a;
909 
910 	a.full = dfixed_const(1000);
911 	yclk.full = dfixed_const(wm->yclk);
912 	yclk.full = dfixed_div(yclk, a);
913 	dram_channels.full = dfixed_const(wm->dram_channels * 4);
914 	a.full = dfixed_const(10);
915 	disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */
916 	disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
917 	bandwidth.full = dfixed_mul(dram_channels, yclk);
918 	bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);
919 
920 	return dfixed_trunc(bandwidth);
921 }
922 
923 /**
924  * dce_v8_0_data_return_bandwidth - get the data return bandwidth
925  *
926  * @wm: watermark calculation data
927  *
928  * Calculate the data return bandwidth used for display (CIK).
929  * Used for display watermark bandwidth calculations
930  * Returns the data return bandwidth in MBytes/s
931  */
932 static u32 dce_v8_0_data_return_bandwidth(struct dce8_wm_params *wm)
933 {
934 	/* Calculate the display Data return Bandwidth */
935 	fixed20_12 return_efficiency; /* 0.8 */
936 	fixed20_12 sclk, bandwidth;
937 	fixed20_12 a;
938 
939 	a.full = dfixed_const(1000);
940 	sclk.full = dfixed_const(wm->sclk);
941 	sclk.full = dfixed_div(sclk, a);
942 	a.full = dfixed_const(10);
943 	return_efficiency.full = dfixed_const(8);
944 	return_efficiency.full = dfixed_div(return_efficiency, a);
945 	a.full = dfixed_const(32);
946 	bandwidth.full = dfixed_mul(a, sclk);
947 	bandwidth.full = dfixed_mul(bandwidth, return_efficiency);
948 
949 	return dfixed_trunc(bandwidth);
950 }
951 
952 /**
953  * dce_v8_0_dmif_request_bandwidth - get the dmif bandwidth
954  *
955  * @wm: watermark calculation data
956  *
957  * Calculate the dmif bandwidth used for display (CIK).
958  * Used for display watermark bandwidth calculations
959  * Returns the dmif bandwidth in MBytes/s
960  */
961 static u32 dce_v8_0_dmif_request_bandwidth(struct dce8_wm_params *wm)
962 {
963 	/* Calculate the DMIF Request Bandwidth */
964 	fixed20_12 disp_clk_request_efficiency; /* 0.8 */
965 	fixed20_12 disp_clk, bandwidth;
966 	fixed20_12 a, b;
967 
968 	a.full = dfixed_const(1000);
969 	disp_clk.full = dfixed_const(wm->disp_clk);
970 	disp_clk.full = dfixed_div(disp_clk, a);
971 	a.full = dfixed_const(32);
972 	b.full = dfixed_mul(a, disp_clk);
973 
974 	a.full = dfixed_const(10);
975 	disp_clk_request_efficiency.full = dfixed_const(8);
976 	disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
977 
978 	bandwidth.full = dfixed_mul(b, disp_clk_request_efficiency);
979 
980 	return dfixed_trunc(bandwidth);
981 }
982 
983 /**
984  * dce_v8_0_available_bandwidth - get the min available bandwidth
985  *
986  * @wm: watermark calculation data
987  *
988  * Calculate the min available bandwidth used for display (CIK).
989  * Used for display watermark bandwidth calculations
990  * Returns the min available bandwidth in MBytes/s
991  */
992 static u32 dce_v8_0_available_bandwidth(struct dce8_wm_params *wm)
993 {
994 	/* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
995 	u32 dram_bandwidth = dce_v8_0_dram_bandwidth(wm);
996 	u32 data_return_bandwidth = dce_v8_0_data_return_bandwidth(wm);
997 	u32 dmif_req_bandwidth = dce_v8_0_dmif_request_bandwidth(wm);
998 
999 	return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
1000 }
1001 
1002 /**
1003  * dce_v8_0_average_bandwidth - get the average available bandwidth
1004  *
1005  * @wm: watermark calculation data
1006  *
1007  * Calculate the average available bandwidth used for display (CIK).
1008  * Used for display watermark bandwidth calculations
1009  * Returns the average available bandwidth in MBytes/s
1010  */
1011 static u32 dce_v8_0_average_bandwidth(struct dce8_wm_params *wm)
1012 {
1013 	/* Calculate the display mode Average Bandwidth
1014 	 * DisplayMode should contain the source and destination dimensions,
1015 	 * timing, etc.
1016 	 */
1017 	fixed20_12 bpp;
1018 	fixed20_12 line_time;
1019 	fixed20_12 src_width;
1020 	fixed20_12 bandwidth;
1021 	fixed20_12 a;
1022 
1023 	a.full = dfixed_const(1000);
1024 	line_time.full = dfixed_const(wm->active_time + wm->blank_time);
1025 	line_time.full = dfixed_div(line_time, a);
1026 	bpp.full = dfixed_const(wm->bytes_per_pixel);
1027 	src_width.full = dfixed_const(wm->src_width);
1028 	bandwidth.full = dfixed_mul(src_width, bpp);
1029 	bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
1030 	bandwidth.full = dfixed_div(bandwidth, line_time);
1031 
1032 	return dfixed_trunc(bandwidth);
1033 }
1034 
1035 /**
1036  * dce_v8_0_latency_watermark - get the latency watermark
1037  *
1038  * @wm: watermark calculation data
1039  *
1040  * Calculate the latency watermark (CIK).
1041  * Used for display watermark bandwidth calculations
1042  * Returns the latency watermark in ns
1043  */
1044 static u32 dce_v8_0_latency_watermark(struct dce8_wm_params *wm)
1045 {
1046 	/* First calculate the latency in ns */
1047 	u32 mc_latency = 2000; /* 2000 ns. */
1048 	u32 available_bandwidth = dce_v8_0_available_bandwidth(wm);
1049 	u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
1050 	u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
1051 	u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */
1052 	u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
1053 		(wm->num_heads * cursor_line_pair_return_time);
1054 	u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
1055 	u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
1056 	u32 tmp, dmif_size = 12288;
1057 	fixed20_12 a, b, c;
1058 
1059 	if (wm->num_heads == 0)
1060 		return 0;
1061 
1062 	a.full = dfixed_const(2);
1063 	b.full = dfixed_const(1);
1064 	if ((wm->vsc.full > a.full) ||
1065 	    ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
1066 	    (wm->vtaps >= 5) ||
1067 	    ((wm->vsc.full >= a.full) && wm->interlaced))
1068 		max_src_lines_per_dst_line = 4;
1069 	else
1070 		max_src_lines_per_dst_line = 2;
1071 
1072 	a.full = dfixed_const(available_bandwidth);
1073 	b.full = dfixed_const(wm->num_heads);
1074 	a.full = dfixed_div(a, b);
1075 
1076 	b.full = dfixed_const(mc_latency + 512);
1077 	c.full = dfixed_const(wm->disp_clk);
1078 	b.full = dfixed_div(b, c);
1079 
1080 	c.full = dfixed_const(dmif_size);
1081 	b.full = dfixed_div(c, b);
1082 
1083 	tmp = min(dfixed_trunc(a), dfixed_trunc(b));
1084 
1085 	b.full = dfixed_const(1000);
1086 	c.full = dfixed_const(wm->disp_clk);
1087 	b.full = dfixed_div(c, b);
1088 	c.full = dfixed_const(wm->bytes_per_pixel);
1089 	b.full = dfixed_mul(b, c);
1090 
1091 	lb_fill_bw = min(tmp, dfixed_trunc(b));
1092 
1093 	a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
1094 	b.full = dfixed_const(1000);
1095 	c.full = dfixed_const(lb_fill_bw);
1096 	b.full = dfixed_div(c, b);
1097 	a.full = dfixed_div(a, b);
1098 	line_fill_time = dfixed_trunc(a);
1099 
1100 	if (line_fill_time < wm->active_time)
1101 		return latency;
1102 	else
1103 		return latency + (line_fill_time - wm->active_time);
1104 
1105 }
1106 
1107 /**
1108  * dce_v8_0_average_bandwidth_vs_dram_bandwidth_for_display - check
1109  * average and available dram bandwidth
1110  *
1111  * @wm: watermark calculation data
1112  *
1113  * Check if the display average bandwidth fits in the display
1114  * dram bandwidth (CIK).
1115  * Used for display watermark bandwidth calculations
1116  * Returns true if the display fits, false if not.
1117  */
1118 static bool dce_v8_0_average_bandwidth_vs_dram_bandwidth_for_display(struct dce8_wm_params *wm)
1119 {
1120 	if (dce_v8_0_average_bandwidth(wm) <=
1121 	    (dce_v8_0_dram_bandwidth_for_display(wm) / wm->num_heads))
1122 		return true;
1123 	else
1124 		return false;
1125 }
1126 
1127 /**
1128  * dce_v8_0_average_bandwidth_vs_available_bandwidth - check
1129  * average and available bandwidth
1130  *
1131  * @wm: watermark calculation data
1132  *
1133  * Check if the display average bandwidth fits in the display
1134  * available bandwidth (CIK).
1135  * Used for display watermark bandwidth calculations
1136  * Returns true if the display fits, false if not.
1137  */
1138 static bool dce_v8_0_average_bandwidth_vs_available_bandwidth(struct dce8_wm_params *wm)
1139 {
1140 	if (dce_v8_0_average_bandwidth(wm) <=
1141 	    (dce_v8_0_available_bandwidth(wm) / wm->num_heads))
1142 		return true;
1143 	else
1144 		return false;
1145 }
1146 
1147 /**
1148  * dce_v8_0_check_latency_hiding - check latency hiding
1149  *
1150  * @wm: watermark calculation data
1151  *
1152  * Check latency hiding (CIK).
1153  * Used for display watermark bandwidth calculations
1154  * Returns true if the display fits, false if not.
1155  */
1156 static bool dce_v8_0_check_latency_hiding(struct dce8_wm_params *wm)
1157 {
1158 	u32 lb_partitions = wm->lb_size / wm->src_width;
1159 	u32 line_time = wm->active_time + wm->blank_time;
1160 	u32 latency_tolerant_lines;
1161 	u32 latency_hiding;
1162 	fixed20_12 a;
1163 
1164 	a.full = dfixed_const(1);
1165 	if (wm->vsc.full > a.full)
1166 		latency_tolerant_lines = 1;
1167 	else {
1168 		if (lb_partitions <= (wm->vtaps + 1))
1169 			latency_tolerant_lines = 1;
1170 		else
1171 			latency_tolerant_lines = 2;
1172 	}
1173 
1174 	latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
1175 
1176 	if (dce_v8_0_latency_watermark(wm) <= latency_hiding)
1177 		return true;
1178 	else
1179 		return false;
1180 }
1181 
1182 /**
1183  * dce_v8_0_program_watermarks - program display watermarks
1184  *
1185  * @adev: amdgpu_device pointer
1186  * @amdgpu_crtc: the selected display controller
1187  * @lb_size: line buffer size
1188  * @num_heads: number of display controllers in use
1189  *
1190  * Calculate and program the display watermarks for the
1191  * selected display controller (CIK).
1192  */
1193 static void dce_v8_0_program_watermarks(struct amdgpu_device *adev,
1194 					struct amdgpu_crtc *amdgpu_crtc,
1195 					u32 lb_size, u32 num_heads)
1196 {
1197 	struct drm_display_mode *mode = &amdgpu_crtc->base.mode;
1198 	struct dce8_wm_params wm_low, wm_high;
1199 	u32 pixel_period;
1200 	u32 line_time = 0;
1201 	u32 latency_watermark_a = 0, latency_watermark_b = 0;
1202 	u32 tmp, wm_mask;
1203 
1204 	if (amdgpu_crtc->base.enabled && num_heads && mode) {
1205 		pixel_period = 1000000 / (u32)mode->clock;
1206 		line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
1207 
1208 		/* watermark for high clocks */
1209 		if (adev->pm.dpm_enabled) {
1210 			wm_high.yclk =
1211 				amdgpu_dpm_get_mclk(adev, false) * 10;
1212 			wm_high.sclk =
1213 				amdgpu_dpm_get_sclk(adev, false) * 10;
1214 		} else {
1215 			wm_high.yclk = adev->pm.current_mclk * 10;
1216 			wm_high.sclk = adev->pm.current_sclk * 10;
1217 		}
1218 
1219 		wm_high.disp_clk = mode->clock;
1220 		wm_high.src_width = mode->crtc_hdisplay;
1221 		wm_high.active_time = mode->crtc_hdisplay * pixel_period;
1222 		wm_high.blank_time = line_time - wm_high.active_time;
1223 		wm_high.interlaced = false;
1224 		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1225 			wm_high.interlaced = true;
1226 		wm_high.vsc = amdgpu_crtc->vsc;
1227 		wm_high.vtaps = 1;
1228 		if (amdgpu_crtc->rmx_type != RMX_OFF)
1229 			wm_high.vtaps = 2;
1230 		wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */
1231 		wm_high.lb_size = lb_size;
1232 		wm_high.dram_channels = cik_get_number_of_dram_channels(adev);
1233 		wm_high.num_heads = num_heads;
1234 
1235 		/* set for high clocks */
1236 		latency_watermark_a = min(dce_v8_0_latency_watermark(&wm_high), (u32)65535);
1237 
1238 		/* possibly force display priority to high */
1239 		/* should really do this at mode validation time... */
1240 		if (!dce_v8_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) ||
1241 		    !dce_v8_0_average_bandwidth_vs_available_bandwidth(&wm_high) ||
1242 		    !dce_v8_0_check_latency_hiding(&wm_high) ||
1243 		    (adev->mode_info.disp_priority == 2)) {
1244 			DRM_DEBUG_KMS("force priority to high\n");
1245 		}
1246 
1247 		/* watermark for low clocks */
1248 		if (adev->pm.dpm_enabled) {
1249 			wm_low.yclk =
1250 				amdgpu_dpm_get_mclk(adev, true) * 10;
1251 			wm_low.sclk =
1252 				amdgpu_dpm_get_sclk(adev, true) * 10;
1253 		} else {
1254 			wm_low.yclk = adev->pm.current_mclk * 10;
1255 			wm_low.sclk = adev->pm.current_sclk * 10;
1256 		}
1257 
1258 		wm_low.disp_clk = mode->clock;
1259 		wm_low.src_width = mode->crtc_hdisplay;
1260 		wm_low.active_time = mode->crtc_hdisplay * pixel_period;
1261 		wm_low.blank_time = line_time - wm_low.active_time;
1262 		wm_low.interlaced = false;
1263 		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1264 			wm_low.interlaced = true;
1265 		wm_low.vsc = amdgpu_crtc->vsc;
1266 		wm_low.vtaps = 1;
1267 		if (amdgpu_crtc->rmx_type != RMX_OFF)
1268 			wm_low.vtaps = 2;
1269 		wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */
1270 		wm_low.lb_size = lb_size;
1271 		wm_low.dram_channels = cik_get_number_of_dram_channels(adev);
1272 		wm_low.num_heads = num_heads;
1273 
1274 		/* set for low clocks */
1275 		latency_watermark_b = min(dce_v8_0_latency_watermark(&wm_low), (u32)65535);
1276 
1277 		/* possibly force display priority to high */
1278 		/* should really do this at mode validation time... */
1279 		if (!dce_v8_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) ||
1280 		    !dce_v8_0_average_bandwidth_vs_available_bandwidth(&wm_low) ||
1281 		    !dce_v8_0_check_latency_hiding(&wm_low) ||
1282 		    (adev->mode_info.disp_priority == 2)) {
1283 			DRM_DEBUG_KMS("force priority to high\n");
1284 		}
1285 	}
1286 
1287 	/* select wm A */
1288 	wm_mask = RREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset);
1289 	tmp = wm_mask;
1290 	tmp &= ~(3 << DPG_WATERMARK_MASK_CONTROL__URGENCY_WATERMARK_MASK__SHIFT);
1291 	tmp |= (1 << DPG_WATERMARK_MASK_CONTROL__URGENCY_WATERMARK_MASK__SHIFT);
1292 	WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp);
1293 	WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset,
1294 	       ((latency_watermark_a << DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT) |
1295 		(line_time << DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT)));
1296 	/* select wm B */
1297 	tmp = RREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset);
1298 	tmp &= ~(3 << DPG_WATERMARK_MASK_CONTROL__URGENCY_WATERMARK_MASK__SHIFT);
1299 	tmp |= (2 << DPG_WATERMARK_MASK_CONTROL__URGENCY_WATERMARK_MASK__SHIFT);
1300 	WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp);
1301 	WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset,
1302 	       ((latency_watermark_b << DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT) |
1303 		(line_time << DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT)));
1304 	/* restore original selection */
1305 	WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, wm_mask);
1306 
1307 	/* save values for DPM */
1308 	amdgpu_crtc->line_time = line_time;
1309 	amdgpu_crtc->wm_high = latency_watermark_a;
1310 	amdgpu_crtc->wm_low = latency_watermark_b;
1311 }
1312 
1313 /**
1314  * dce_v8_0_bandwidth_update - program display watermarks
1315  *
1316  * @adev: amdgpu_device pointer
1317  *
1318  * Calculate and program the display watermarks and line
1319  * buffer allocation (CIK).
1320  */
1321 static void dce_v8_0_bandwidth_update(struct amdgpu_device *adev)
1322 {
1323 	struct drm_display_mode *mode = NULL;
1324 	u32 num_heads = 0, lb_size;
1325 	int i;
1326 
1327 	amdgpu_update_display_priority(adev);
1328 
1329 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
1330 		if (adev->mode_info.crtcs[i]->base.enabled)
1331 			num_heads++;
1332 	}
1333 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
1334 		mode = &adev->mode_info.crtcs[i]->base.mode;
1335 		lb_size = dce_v8_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i], mode);
1336 		dce_v8_0_program_watermarks(adev, adev->mode_info.crtcs[i],
1337 					    lb_size, num_heads);
1338 	}
1339 }
1340 
1341 static void dce_v8_0_audio_get_connected_pins(struct amdgpu_device *adev)
1342 {
1343 	int i;
1344 	u32 offset, tmp;
1345 
1346 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1347 		offset = adev->mode_info.audio.pin[i].offset;
1348 		tmp = RREG32_AUDIO_ENDPT(offset,
1349 					 ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT);
1350 		if (((tmp &
1351 		AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK) >>
1352 		AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT) == 1)
1353 			adev->mode_info.audio.pin[i].connected = false;
1354 		else
1355 			adev->mode_info.audio.pin[i].connected = true;
1356 	}
1357 }
1358 
1359 static struct amdgpu_audio_pin *dce_v8_0_audio_get_pin(struct amdgpu_device *adev)
1360 {
1361 	int i;
1362 
1363 	dce_v8_0_audio_get_connected_pins(adev);
1364 
1365 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1366 		if (adev->mode_info.audio.pin[i].connected)
1367 			return &adev->mode_info.audio.pin[i];
1368 	}
1369 	DRM_ERROR("No connected audio pins found!\n");
1370 	return NULL;
1371 }
1372 
1373 static void dce_v8_0_afmt_audio_select_pin(struct drm_encoder *encoder)
1374 {
1375 	struct amdgpu_device *adev = encoder->dev->dev_private;
1376 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1377 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1378 	u32 offset;
1379 
1380 	if (!dig || !dig->afmt || !dig->afmt->pin)
1381 		return;
1382 
1383 	offset = dig->afmt->offset;
1384 
1385 	WREG32(mmAFMT_AUDIO_SRC_CONTROL + offset,
1386 	       (dig->afmt->pin->id << AFMT_AUDIO_SRC_CONTROL__AFMT_AUDIO_SRC_SELECT__SHIFT));
1387 }
1388 
1389 static void dce_v8_0_audio_write_latency_fields(struct drm_encoder *encoder,
1390 						struct drm_display_mode *mode)
1391 {
1392 	struct amdgpu_device *adev = encoder->dev->dev_private;
1393 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1394 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1395 	struct drm_connector *connector;
1396 	struct amdgpu_connector *amdgpu_connector = NULL;
1397 	u32 tmp = 0, offset;
1398 
1399 	if (!dig || !dig->afmt || !dig->afmt->pin)
1400 		return;
1401 
1402 	offset = dig->afmt->pin->offset;
1403 
1404 	list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
1405 		if (connector->encoder == encoder) {
1406 			amdgpu_connector = to_amdgpu_connector(connector);
1407 			break;
1408 		}
1409 	}
1410 
1411 	if (!amdgpu_connector) {
1412 		DRM_ERROR("Couldn't find encoder's connector\n");
1413 		return;
1414 	}
1415 
1416 	if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
1417 		if (connector->latency_present[1])
1418 			tmp =
1419 			(connector->video_latency[1] <<
1420 			 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT) |
1421 			(connector->audio_latency[1] <<
1422 			 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT);
1423 		else
1424 			tmp =
1425 			(0 <<
1426 			 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT) |
1427 			(0 <<
1428 			 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT);
1429 	} else {
1430 		if (connector->latency_present[0])
1431 			tmp =
1432 			(connector->video_latency[0] <<
1433 			 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT) |
1434 			(connector->audio_latency[0] <<
1435 			 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT);
1436 		else
1437 			tmp =
1438 			(0 <<
1439 			 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT) |
1440 			(0 <<
1441 			 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT);
1442 
1443 	}
1444 	WREG32_AUDIO_ENDPT(offset, ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp);
1445 }
1446 
1447 static void dce_v8_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
1448 {
1449 	struct amdgpu_device *adev = encoder->dev->dev_private;
1450 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1451 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1452 	struct drm_connector *connector;
1453 	struct amdgpu_connector *amdgpu_connector = NULL;
1454 	u32 offset, tmp;
1455 	u8 *sadb = NULL;
1456 	int sad_count;
1457 
1458 	if (!dig || !dig->afmt || !dig->afmt->pin)
1459 		return;
1460 
1461 	offset = dig->afmt->pin->offset;
1462 
1463 	list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
1464 		if (connector->encoder == encoder) {
1465 			amdgpu_connector = to_amdgpu_connector(connector);
1466 			break;
1467 		}
1468 	}
1469 
1470 	if (!amdgpu_connector) {
1471 		DRM_ERROR("Couldn't find encoder's connector\n");
1472 		return;
1473 	}
1474 
1475 	sad_count = drm_edid_to_speaker_allocation(amdgpu_connector_edid(connector), &sadb);
1476 	if (sad_count < 0) {
1477 		DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
1478 		sad_count = 0;
1479 	}
1480 
1481 	/* program the speaker allocation */
1482 	tmp = RREG32_AUDIO_ENDPT(offset, ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
1483 	tmp &= ~(AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DP_CONNECTION_MASK |
1484 		AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION_MASK);
1485 	/* set HDMI mode */
1486 	tmp |= AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__HDMI_CONNECTION_MASK;
1487 	if (sad_count)
1488 		tmp |= (sadb[0] << AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION__SHIFT);
1489 	else
1490 		tmp |= (5 << AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION__SHIFT); /* stereo */
1491 	WREG32_AUDIO_ENDPT(offset, ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
1492 
1493 	kfree(sadb);
1494 }
1495 
1496 static void dce_v8_0_audio_write_sad_regs(struct drm_encoder *encoder)
1497 {
1498 	struct amdgpu_device *adev = encoder->dev->dev_private;
1499 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1500 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1501 	u32 offset;
1502 	struct drm_connector *connector;
1503 	struct amdgpu_connector *amdgpu_connector = NULL;
1504 	struct cea_sad *sads;
1505 	int i, sad_count;
1506 
1507 	static const u16 eld_reg_to_type[][2] = {
1508 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, HDMI_AUDIO_CODING_TYPE_PCM },
1509 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1, HDMI_AUDIO_CODING_TYPE_AC3 },
1510 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2, HDMI_AUDIO_CODING_TYPE_MPEG1 },
1511 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3, HDMI_AUDIO_CODING_TYPE_MP3 },
1512 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4, HDMI_AUDIO_CODING_TYPE_MPEG2 },
1513 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5, HDMI_AUDIO_CODING_TYPE_AAC_LC },
1514 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6, HDMI_AUDIO_CODING_TYPE_DTS },
1515 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7, HDMI_AUDIO_CODING_TYPE_ATRAC },
1516 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9, HDMI_AUDIO_CODING_TYPE_EAC3 },
1517 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10, HDMI_AUDIO_CODING_TYPE_DTS_HD },
1518 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11, HDMI_AUDIO_CODING_TYPE_MLP },
1519 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
1520 	};
1521 
1522 	if (!dig || !dig->afmt || !dig->afmt->pin)
1523 		return;
1524 
1525 	offset = dig->afmt->pin->offset;
1526 
1527 	list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
1528 		if (connector->encoder == encoder) {
1529 			amdgpu_connector = to_amdgpu_connector(connector);
1530 			break;
1531 		}
1532 	}
1533 
1534 	if (!amdgpu_connector) {
1535 		DRM_ERROR("Couldn't find encoder's connector\n");
1536 		return;
1537 	}
1538 
1539 	sad_count = drm_edid_to_sad(amdgpu_connector_edid(connector), &sads);
1540 	if (sad_count <= 0) {
1541 		DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
1542 		return;
1543 	}
1544 	BUG_ON(!sads);
1545 
1546 	for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
1547 		u32 value = 0;
1548 		u8 stereo_freqs = 0;
1549 		int max_channels = -1;
1550 		int j;
1551 
1552 		for (j = 0; j < sad_count; j++) {
1553 			struct cea_sad *sad = &sads[j];
1554 
1555 			if (sad->format == eld_reg_to_type[i][1]) {
1556 				if (sad->channels > max_channels) {
1557 				value = (sad->channels <<
1558 				 AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS__SHIFT) |
1559 				(sad->byte2 <<
1560 				 AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2__SHIFT) |
1561 				(sad->freq <<
1562 				 AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES__SHIFT);
1563 				max_channels = sad->channels;
1564 				}
1565 
1566 				if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM)
1567 					stereo_freqs |= sad->freq;
1568 				else
1569 					break;
1570 			}
1571 		}
1572 
1573 		value |= (stereo_freqs <<
1574 			AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO__SHIFT);
1575 
1576 		WREG32_AUDIO_ENDPT(offset, eld_reg_to_type[i][0], value);
1577 	}
1578 
1579 	kfree(sads);
1580 }
1581 
1582 static void dce_v8_0_audio_enable(struct amdgpu_device *adev,
1583 				  struct amdgpu_audio_pin *pin,
1584 				  bool enable)
1585 {
1586 	if (!pin)
1587 		return;
1588 
1589 	WREG32_AUDIO_ENDPT(pin->offset, ixAZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
1590 		enable ? AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK : 0);
1591 }
1592 
1593 static const u32 pin_offsets[7] =
1594 {
1595 	(0x1780 - 0x1780),
1596 	(0x1786 - 0x1780),
1597 	(0x178c - 0x1780),
1598 	(0x1792 - 0x1780),
1599 	(0x1798 - 0x1780),
1600 	(0x179d - 0x1780),
1601 	(0x17a4 - 0x1780),
1602 };
1603 
1604 static int dce_v8_0_audio_init(struct amdgpu_device *adev)
1605 {
1606 	int i;
1607 
1608 	if (!amdgpu_audio)
1609 		return 0;
1610 
1611 	adev->mode_info.audio.enabled = true;
1612 
1613 	if (adev->asic_type == CHIP_KAVERI) /* KV: 4 streams, 7 endpoints */
1614 		adev->mode_info.audio.num_pins = 7;
1615 	else if ((adev->asic_type == CHIP_KABINI) ||
1616 		 (adev->asic_type == CHIP_MULLINS)) /* KB/ML: 2 streams, 3 endpoints */
1617 		adev->mode_info.audio.num_pins = 3;
1618 	else if ((adev->asic_type == CHIP_BONAIRE) ||
1619 		 (adev->asic_type == CHIP_HAWAII))/* BN/HW: 6 streams, 7 endpoints */
1620 		adev->mode_info.audio.num_pins = 7;
1621 	else
1622 		adev->mode_info.audio.num_pins = 3;
1623 
1624 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1625 		adev->mode_info.audio.pin[i].channels = -1;
1626 		adev->mode_info.audio.pin[i].rate = -1;
1627 		adev->mode_info.audio.pin[i].bits_per_sample = -1;
1628 		adev->mode_info.audio.pin[i].status_bits = 0;
1629 		adev->mode_info.audio.pin[i].category_code = 0;
1630 		adev->mode_info.audio.pin[i].connected = false;
1631 		adev->mode_info.audio.pin[i].offset = pin_offsets[i];
1632 		adev->mode_info.audio.pin[i].id = i;
1633 		/* disable audio.  it will be set up later */
1634 		/* XXX remove once we switch to ip funcs */
1635 		dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
1636 	}
1637 
1638 	return 0;
1639 }
1640 
1641 static void dce_v8_0_audio_fini(struct amdgpu_device *adev)
1642 {
1643 	int i;
1644 
1645 	if (!adev->mode_info.audio.enabled)
1646 		return;
1647 
1648 	for (i = 0; i < adev->mode_info.audio.num_pins; i++)
1649 		dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
1650 
1651 	adev->mode_info.audio.enabled = false;
1652 }
1653 
1654 /*
1655  * update the N and CTS parameters for a given pixel clock rate
1656  */
1657 static void dce_v8_0_afmt_update_ACR(struct drm_encoder *encoder, uint32_t clock)
1658 {
1659 	struct drm_device *dev = encoder->dev;
1660 	struct amdgpu_device *adev = dev->dev_private;
1661 	struct amdgpu_afmt_acr acr = amdgpu_afmt_acr(clock);
1662 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1663 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1664 	uint32_t offset = dig->afmt->offset;
1665 
1666 	WREG32(mmHDMI_ACR_32_0 + offset, (acr.cts_32khz << HDMI_ACR_44_0__HDMI_ACR_CTS_44__SHIFT));
1667 	WREG32(mmHDMI_ACR_32_1 + offset, acr.n_32khz);
1668 
1669 	WREG32(mmHDMI_ACR_44_0 + offset, (acr.cts_44_1khz << HDMI_ACR_44_0__HDMI_ACR_CTS_44__SHIFT));
1670 	WREG32(mmHDMI_ACR_44_1 + offset, acr.n_44_1khz);
1671 
1672 	WREG32(mmHDMI_ACR_48_0 + offset, (acr.cts_48khz << HDMI_ACR_48_0__HDMI_ACR_CTS_48__SHIFT));
1673 	WREG32(mmHDMI_ACR_48_1 + offset, acr.n_48khz);
1674 }
1675 
1676 /*
1677  * build a HDMI Video Info Frame
1678  */
1679 static void dce_v8_0_afmt_update_avi_infoframe(struct drm_encoder *encoder,
1680 					       void *buffer, size_t size)
1681 {
1682 	struct drm_device *dev = encoder->dev;
1683 	struct amdgpu_device *adev = dev->dev_private;
1684 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1685 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1686 	uint32_t offset = dig->afmt->offset;
1687 	uint8_t *frame = buffer + 3;
1688 	uint8_t *header = buffer;
1689 
1690 	WREG32(mmAFMT_AVI_INFO0 + offset,
1691 		frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
1692 	WREG32(mmAFMT_AVI_INFO1 + offset,
1693 		frame[0x4] | (frame[0x5] << 8) | (frame[0x6] << 16) | (frame[0x7] << 24));
1694 	WREG32(mmAFMT_AVI_INFO2 + offset,
1695 		frame[0x8] | (frame[0x9] << 8) | (frame[0xA] << 16) | (frame[0xB] << 24));
1696 	WREG32(mmAFMT_AVI_INFO3 + offset,
1697 		frame[0xC] | (frame[0xD] << 8) | (header[1] << 24));
1698 }
1699 
1700 static void dce_v8_0_audio_set_dto(struct drm_encoder *encoder, u32 clock)
1701 {
1702 	struct drm_device *dev = encoder->dev;
1703 	struct amdgpu_device *adev = dev->dev_private;
1704 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1705 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1706 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
1707 	u32 dto_phase = 24 * 1000;
1708 	u32 dto_modulo = clock;
1709 
1710 	if (!dig || !dig->afmt)
1711 		return;
1712 
1713 	/* XXX two dtos; generally use dto0 for hdmi */
1714 	/* Express [24MHz / target pixel clock] as an exact rational
1715 	 * number (coefficient of two integer numbers.  DCCG_AUDIO_DTOx_PHASE
1716 	 * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
1717 	 */
1718 	WREG32(mmDCCG_AUDIO_DTO_SOURCE, (amdgpu_crtc->crtc_id << DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO0_SOURCE_SEL__SHIFT));
1719 	WREG32(mmDCCG_AUDIO_DTO0_PHASE, dto_phase);
1720 	WREG32(mmDCCG_AUDIO_DTO0_MODULE, dto_modulo);
1721 }
1722 
1723 /*
1724  * update the info frames with the data from the current display mode
1725  */
1726 static void dce_v8_0_afmt_setmode(struct drm_encoder *encoder,
1727 				  struct drm_display_mode *mode)
1728 {
1729 	struct drm_device *dev = encoder->dev;
1730 	struct amdgpu_device *adev = dev->dev_private;
1731 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1732 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1733 	struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
1734 	u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
1735 	struct hdmi_avi_infoframe frame;
1736 	uint32_t offset, val;
1737 	ssize_t err;
1738 	int bpc = 8;
1739 
1740 	if (!dig || !dig->afmt)
1741 		return;
1742 
1743 	/* Silent, r600_hdmi_enable will raise WARN for us */
1744 	if (!dig->afmt->enabled)
1745 		return;
1746 	offset = dig->afmt->offset;
1747 
1748 	/* hdmi deep color mode general control packets setup, if bpc > 8 */
1749 	if (encoder->crtc) {
1750 		struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
1751 		bpc = amdgpu_crtc->bpc;
1752 	}
1753 
1754 	/* disable audio prior to setting up hw */
1755 	dig->afmt->pin = dce_v8_0_audio_get_pin(adev);
1756 	dce_v8_0_audio_enable(adev, dig->afmt->pin, false);
1757 
1758 	dce_v8_0_audio_set_dto(encoder, mode->clock);
1759 
1760 	WREG32(mmHDMI_VBI_PACKET_CONTROL + offset,
1761 	       HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND_MASK); /* send null packets when required */
1762 
1763 	WREG32(mmAFMT_AUDIO_CRC_CONTROL + offset, 0x1000);
1764 
1765 	val = RREG32(mmHDMI_CONTROL + offset);
1766 	val &= ~HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE_MASK;
1767 	val &= ~HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH_MASK;
1768 
1769 	switch (bpc) {
1770 	case 0:
1771 	case 6:
1772 	case 8:
1773 	case 16:
1774 	default:
1775 		DRM_DEBUG("%s: Disabling hdmi deep color for %d bpc.\n",
1776 			  connector->name, bpc);
1777 		break;
1778 	case 10:
1779 		val |= HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE_MASK;
1780 		val |= 1 << HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH__SHIFT;
1781 		DRM_DEBUG("%s: Enabling hdmi deep color 30 for 10 bpc.\n",
1782 			  connector->name);
1783 		break;
1784 	case 12:
1785 		val |= HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE_MASK;
1786 		val |= 2 << HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH__SHIFT;
1787 		DRM_DEBUG("%s: Enabling hdmi deep color 36 for 12 bpc.\n",
1788 			  connector->name);
1789 		break;
1790 	}
1791 
1792 	WREG32(mmHDMI_CONTROL + offset, val);
1793 
1794 	WREG32(mmHDMI_VBI_PACKET_CONTROL + offset,
1795 	       HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND_MASK | /* send null packets when required */
1796 	       HDMI_VBI_PACKET_CONTROL__HDMI_GC_SEND_MASK | /* send general control packets */
1797 	       HDMI_VBI_PACKET_CONTROL__HDMI_GC_CONT_MASK); /* send general control packets every frame */
1798 
1799 	WREG32(mmHDMI_INFOFRAME_CONTROL0 + offset,
1800 	       HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_SEND_MASK | /* enable audio info frames (frames won't be set until audio is enabled) */
1801 	       HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_CONT_MASK); /* required for audio info values to be updated */
1802 
1803 	WREG32(mmAFMT_INFOFRAME_CONTROL0 + offset,
1804 	       AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_UPDATE_MASK); /* required for audio info values to be updated */
1805 
1806 	WREG32(mmHDMI_INFOFRAME_CONTROL1 + offset,
1807 	       (2 << HDMI_INFOFRAME_CONTROL1__HDMI_AUDIO_INFO_LINE__SHIFT)); /* anything other than 0 */
1808 
1809 	WREG32(mmHDMI_GC + offset, 0); /* unset HDMI_GC_AVMUTE */
1810 
1811 	WREG32(mmHDMI_AUDIO_PACKET_CONTROL + offset,
1812 	       (1 << HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_DELAY_EN__SHIFT) | /* set the default audio delay */
1813 	       (3 << HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_PACKETS_PER_LINE__SHIFT)); /* should be suffient for all audio modes and small enough for all hblanks */
1814 
1815 	WREG32(mmAFMT_AUDIO_PACKET_CONTROL + offset,
1816 	       AFMT_AUDIO_PACKET_CONTROL__AFMT_60958_CS_UPDATE_MASK); /* allow 60958 channel status fields to be updated */
1817 
1818 	/* fglrx clears sth in AFMT_AUDIO_PACKET_CONTROL2 here */
1819 
1820 	if (bpc > 8)
1821 		WREG32(mmHDMI_ACR_PACKET_CONTROL + offset,
1822 		       HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUTO_SEND_MASK); /* allow hw to sent ACR packets when required */
1823 	else
1824 		WREG32(mmHDMI_ACR_PACKET_CONTROL + offset,
1825 		       HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SOURCE_MASK | /* select SW CTS value */
1826 		       HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUTO_SEND_MASK); /* allow hw to sent ACR packets when required */
1827 
1828 	dce_v8_0_afmt_update_ACR(encoder, mode->clock);
1829 
1830 	WREG32(mmAFMT_60958_0 + offset,
1831 	       (1 << AFMT_60958_0__AFMT_60958_CS_CHANNEL_NUMBER_L__SHIFT));
1832 
1833 	WREG32(mmAFMT_60958_1 + offset,
1834 	       (2 << AFMT_60958_1__AFMT_60958_CS_CHANNEL_NUMBER_R__SHIFT));
1835 
1836 	WREG32(mmAFMT_60958_2 + offset,
1837 	       (3 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_2__SHIFT) |
1838 	       (4 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_3__SHIFT) |
1839 	       (5 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_4__SHIFT) |
1840 	       (6 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_5__SHIFT) |
1841 	       (7 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_6__SHIFT) |
1842 	       (8 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_7__SHIFT));
1843 
1844 	dce_v8_0_audio_write_speaker_allocation(encoder);
1845 
1846 
1847 	WREG32(mmAFMT_AUDIO_PACKET_CONTROL2 + offset,
1848 	       (0xff << AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE__SHIFT));
1849 
1850 	dce_v8_0_afmt_audio_select_pin(encoder);
1851 	dce_v8_0_audio_write_sad_regs(encoder);
1852 	dce_v8_0_audio_write_latency_fields(encoder, mode);
1853 
1854 	err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
1855 	if (err < 0) {
1856 		DRM_ERROR("failed to setup AVI infoframe: %zd\n", err);
1857 		return;
1858 	}
1859 
1860 	err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer));
1861 	if (err < 0) {
1862 		DRM_ERROR("failed to pack AVI infoframe: %zd\n", err);
1863 		return;
1864 	}
1865 
1866 	dce_v8_0_afmt_update_avi_infoframe(encoder, buffer, sizeof(buffer));
1867 
1868 	WREG32_OR(mmHDMI_INFOFRAME_CONTROL0 + offset,
1869 		  HDMI_INFOFRAME_CONTROL0__HDMI_AVI_INFO_SEND_MASK | /* enable AVI info frames */
1870 		  HDMI_INFOFRAME_CONTROL0__HDMI_AVI_INFO_SEND_MASK); /* required for audio info values to be updated */
1871 
1872 	WREG32_P(mmHDMI_INFOFRAME_CONTROL1 + offset,
1873 		 (2 << HDMI_INFOFRAME_CONTROL1__HDMI_AVI_INFO_LINE__SHIFT), /* anything other than 0 */
1874 		 ~HDMI_INFOFRAME_CONTROL1__HDMI_AVI_INFO_LINE_MASK);
1875 
1876 	WREG32_OR(mmAFMT_AUDIO_PACKET_CONTROL + offset,
1877 		  AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND_MASK); /* send audio packets */
1878 
1879 	/* it's unknown what these bits do excatly, but it's indeed quite useful for debugging */
1880 	WREG32(mmAFMT_RAMP_CONTROL0 + offset, 0x00FFFFFF);
1881 	WREG32(mmAFMT_RAMP_CONTROL1 + offset, 0x007FFFFF);
1882 	WREG32(mmAFMT_RAMP_CONTROL2 + offset, 0x00000001);
1883 	WREG32(mmAFMT_RAMP_CONTROL3 + offset, 0x00000001);
1884 
1885 	/* enable audio after to setting up hw */
1886 	dce_v8_0_audio_enable(adev, dig->afmt->pin, true);
1887 }
1888 
1889 static void dce_v8_0_afmt_enable(struct drm_encoder *encoder, bool enable)
1890 {
1891 	struct drm_device *dev = encoder->dev;
1892 	struct amdgpu_device *adev = dev->dev_private;
1893 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1894 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1895 
1896 	if (!dig || !dig->afmt)
1897 		return;
1898 
1899 	/* Silent, r600_hdmi_enable will raise WARN for us */
1900 	if (enable && dig->afmt->enabled)
1901 		return;
1902 	if (!enable && !dig->afmt->enabled)
1903 		return;
1904 
1905 	if (!enable && dig->afmt->pin) {
1906 		dce_v8_0_audio_enable(adev, dig->afmt->pin, false);
1907 		dig->afmt->pin = NULL;
1908 	}
1909 
1910 	dig->afmt->enabled = enable;
1911 
1912 	DRM_DEBUG("%sabling AFMT interface @ 0x%04X for encoder 0x%x\n",
1913 		  enable ? "En" : "Dis", dig->afmt->offset, amdgpu_encoder->encoder_id);
1914 }
1915 
1916 static void dce_v8_0_afmt_init(struct amdgpu_device *adev)
1917 {
1918 	int i;
1919 
1920 	for (i = 0; i < adev->mode_info.num_dig; i++)
1921 		adev->mode_info.afmt[i] = NULL;
1922 
1923 	/* DCE8 has audio blocks tied to DIG encoders */
1924 	for (i = 0; i < adev->mode_info.num_dig; i++) {
1925 		adev->mode_info.afmt[i] = kzalloc(sizeof(struct amdgpu_afmt), GFP_KERNEL);
1926 		if (adev->mode_info.afmt[i]) {
1927 			adev->mode_info.afmt[i]->offset = dig_offsets[i];
1928 			adev->mode_info.afmt[i]->id = i;
1929 		}
1930 	}
1931 }
1932 
1933 static void dce_v8_0_afmt_fini(struct amdgpu_device *adev)
1934 {
1935 	int i;
1936 
1937 	for (i = 0; i < adev->mode_info.num_dig; i++) {
1938 		kfree(adev->mode_info.afmt[i]);
1939 		adev->mode_info.afmt[i] = NULL;
1940 	}
1941 }
1942 
1943 static const u32 vga_control_regs[6] =
1944 {
1945 	mmD1VGA_CONTROL,
1946 	mmD2VGA_CONTROL,
1947 	mmD3VGA_CONTROL,
1948 	mmD4VGA_CONTROL,
1949 	mmD5VGA_CONTROL,
1950 	mmD6VGA_CONTROL,
1951 };
1952 
1953 static void dce_v8_0_vga_enable(struct drm_crtc *crtc, bool enable)
1954 {
1955 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1956 	struct drm_device *dev = crtc->dev;
1957 	struct amdgpu_device *adev = dev->dev_private;
1958 	u32 vga_control;
1959 
1960 	vga_control = RREG32(vga_control_regs[amdgpu_crtc->crtc_id]) & ~1;
1961 	if (enable)
1962 		WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control | 1);
1963 	else
1964 		WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control);
1965 }
1966 
1967 static void dce_v8_0_grph_enable(struct drm_crtc *crtc, bool enable)
1968 {
1969 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1970 	struct drm_device *dev = crtc->dev;
1971 	struct amdgpu_device *adev = dev->dev_private;
1972 
1973 	if (enable)
1974 		WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 1);
1975 	else
1976 		WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 0);
1977 }
1978 
1979 static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc,
1980 				     struct drm_framebuffer *fb,
1981 				     int x, int y, int atomic)
1982 {
1983 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1984 	struct drm_device *dev = crtc->dev;
1985 	struct amdgpu_device *adev = dev->dev_private;
1986 	struct amdgpu_framebuffer *amdgpu_fb;
1987 	struct drm_framebuffer *target_fb;
1988 	struct drm_gem_object *obj;
1989 	struct amdgpu_bo *rbo;
1990 	uint64_t fb_location, tiling_flags;
1991 	uint32_t fb_format, fb_pitch_pixels;
1992 	u32 fb_swap = (GRPH_ENDIAN_NONE << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1993 	u32 pipe_config;
1994 	u32 tmp, viewport_w, viewport_h;
1995 	int r;
1996 	bool bypass_lut = false;
1997 
1998 	/* no fb bound */
1999 	if (!atomic && !crtc->primary->fb) {
2000 		DRM_DEBUG_KMS("No FB bound\n");
2001 		return 0;
2002 	}
2003 
2004 	if (atomic) {
2005 		amdgpu_fb = to_amdgpu_framebuffer(fb);
2006 		target_fb = fb;
2007 	}
2008 	else {
2009 		amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
2010 		target_fb = crtc->primary->fb;
2011 	}
2012 
2013 	/* If atomic, assume fb object is pinned & idle & fenced and
2014 	 * just update base pointers
2015 	 */
2016 	obj = amdgpu_fb->obj;
2017 	rbo = gem_to_amdgpu_bo(obj);
2018 	r = amdgpu_bo_reserve(rbo, false);
2019 	if (unlikely(r != 0))
2020 		return r;
2021 
2022 	if (atomic)
2023 		fb_location = amdgpu_bo_gpu_offset(rbo);
2024 	else {
2025 		r = amdgpu_bo_pin(rbo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
2026 		if (unlikely(r != 0)) {
2027 			amdgpu_bo_unreserve(rbo);
2028 			return -EINVAL;
2029 		}
2030 	}
2031 
2032 	amdgpu_bo_get_tiling_flags(rbo, &tiling_flags);
2033 	amdgpu_bo_unreserve(rbo);
2034 
2035 	pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
2036 
2037 	switch (target_fb->pixel_format) {
2038 	case DRM_FORMAT_C8:
2039 		fb_format = ((GRPH_DEPTH_8BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
2040 			     (GRPH_FORMAT_INDEXED << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
2041 		break;
2042 	case DRM_FORMAT_XRGB4444:
2043 	case DRM_FORMAT_ARGB4444:
2044 		fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
2045 			     (GRPH_FORMAT_ARGB1555 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
2046 #ifdef __BIG_ENDIAN
2047 		fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
2048 #endif
2049 		break;
2050 	case DRM_FORMAT_XRGB1555:
2051 	case DRM_FORMAT_ARGB1555:
2052 		fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
2053 			     (GRPH_FORMAT_ARGB1555 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
2054 #ifdef __BIG_ENDIAN
2055 		fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
2056 #endif
2057 		break;
2058 	case DRM_FORMAT_BGRX5551:
2059 	case DRM_FORMAT_BGRA5551:
2060 		fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
2061 			     (GRPH_FORMAT_BGRA5551 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
2062 #ifdef __BIG_ENDIAN
2063 		fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
2064 #endif
2065 		break;
2066 	case DRM_FORMAT_RGB565:
2067 		fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
2068 			     (GRPH_FORMAT_ARGB565 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
2069 #ifdef __BIG_ENDIAN
2070 		fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
2071 #endif
2072 		break;
2073 	case DRM_FORMAT_XRGB8888:
2074 	case DRM_FORMAT_ARGB8888:
2075 		fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
2076 			     (GRPH_FORMAT_ARGB8888 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
2077 #ifdef __BIG_ENDIAN
2078 		fb_swap = (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
2079 #endif
2080 		break;
2081 	case DRM_FORMAT_XRGB2101010:
2082 	case DRM_FORMAT_ARGB2101010:
2083 		fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
2084 			     (GRPH_FORMAT_ARGB2101010 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
2085 #ifdef __BIG_ENDIAN
2086 		fb_swap = (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
2087 #endif
2088 		/* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
2089 		bypass_lut = true;
2090 		break;
2091 	case DRM_FORMAT_BGRX1010102:
2092 	case DRM_FORMAT_BGRA1010102:
2093 		fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
2094 			     (GRPH_FORMAT_BGRA1010102 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
2095 #ifdef __BIG_ENDIAN
2096 		fb_swap = (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
2097 #endif
2098 		/* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
2099 		bypass_lut = true;
2100 		break;
2101 	default:
2102 		DRM_ERROR("Unsupported screen format %s\n",
2103 			  drm_get_format_name(target_fb->pixel_format));
2104 		return -EINVAL;
2105 	}
2106 
2107 	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_2D_TILED_THIN1) {
2108 		unsigned bankw, bankh, mtaspect, tile_split, num_banks;
2109 
2110 		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
2111 		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
2112 		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
2113 		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
2114 		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
2115 
2116 		fb_format |= (num_banks << GRPH_CONTROL__GRPH_NUM_BANKS__SHIFT);
2117 		fb_format |= (GRPH_ARRAY_2D_TILED_THIN1 << GRPH_CONTROL__GRPH_ARRAY_MODE__SHIFT);
2118 		fb_format |= (tile_split << GRPH_CONTROL__GRPH_TILE_SPLIT__SHIFT);
2119 		fb_format |= (bankw << GRPH_CONTROL__GRPH_BANK_WIDTH__SHIFT);
2120 		fb_format |= (bankh << GRPH_CONTROL__GRPH_BANK_HEIGHT__SHIFT);
2121 		fb_format |= (mtaspect << GRPH_CONTROL__GRPH_MACRO_TILE_ASPECT__SHIFT);
2122 		fb_format |= (DISPLAY_MICRO_TILING << GRPH_CONTROL__GRPH_MICRO_TILE_MODE__SHIFT);
2123 	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_1D_TILED_THIN1) {
2124 		fb_format |= (GRPH_ARRAY_1D_TILED_THIN1 << GRPH_CONTROL__GRPH_ARRAY_MODE__SHIFT);
2125 	}
2126 
2127 	fb_format |= (pipe_config << GRPH_CONTROL__GRPH_PIPE_CONFIG__SHIFT);
2128 
2129 	dce_v8_0_vga_enable(crtc, false);
2130 
2131 	WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
2132 	       upper_32_bits(fb_location));
2133 	WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
2134 	       upper_32_bits(fb_location));
2135 	WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
2136 	       (u32)fb_location & GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK);
2137 	WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
2138 	       (u32) fb_location & GRPH_SECONDARY_SURFACE_ADDRESS__GRPH_SECONDARY_SURFACE_ADDRESS_MASK);
2139 	WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format);
2140 	WREG32(mmGRPH_SWAP_CNTL + amdgpu_crtc->crtc_offset, fb_swap);
2141 
2142 	/*
2143 	 * The LUT only has 256 slots for indexing by a 8 bpc fb. Bypass the LUT
2144 	 * for > 8 bpc scanout to avoid truncation of fb indices to 8 msb's, to
2145 	 * retain the full precision throughout the pipeline.
2146 	 */
2147 	WREG32_P(mmGRPH_LUT_10BIT_BYPASS_CONTROL + amdgpu_crtc->crtc_offset,
2148 		 (bypass_lut ? LUT_10BIT_BYPASS_EN : 0),
2149 		 ~LUT_10BIT_BYPASS_EN);
2150 
2151 	if (bypass_lut)
2152 		DRM_DEBUG_KMS("Bypassing hardware LUT due to 10 bit fb scanout.\n");
2153 
2154 	WREG32(mmGRPH_SURFACE_OFFSET_X + amdgpu_crtc->crtc_offset, 0);
2155 	WREG32(mmGRPH_SURFACE_OFFSET_Y + amdgpu_crtc->crtc_offset, 0);
2156 	WREG32(mmGRPH_X_START + amdgpu_crtc->crtc_offset, 0);
2157 	WREG32(mmGRPH_Y_START + amdgpu_crtc->crtc_offset, 0);
2158 	WREG32(mmGRPH_X_END + amdgpu_crtc->crtc_offset, target_fb->width);
2159 	WREG32(mmGRPH_Y_END + amdgpu_crtc->crtc_offset, target_fb->height);
2160 
2161 	fb_pitch_pixels = target_fb->pitches[0] / (target_fb->bits_per_pixel / 8);
2162 	WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset, fb_pitch_pixels);
2163 
2164 	dce_v8_0_grph_enable(crtc, true);
2165 
2166 	WREG32(mmLB_DESKTOP_HEIGHT + amdgpu_crtc->crtc_offset,
2167 	       target_fb->height);
2168 
2169 	x &= ~3;
2170 	y &= ~1;
2171 	WREG32(mmVIEWPORT_START + amdgpu_crtc->crtc_offset,
2172 	       (x << 16) | y);
2173 	viewport_w = crtc->mode.hdisplay;
2174 	viewport_h = (crtc->mode.vdisplay + 1) & ~1;
2175 	WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset,
2176 	       (viewport_w << 16) | viewport_h);
2177 
2178 	/* pageflip setup */
2179 	/* make sure flip is at vb rather than hb */
2180 	tmp = RREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset);
2181 	tmp &= ~GRPH_FLIP_CONTROL__GRPH_SURFACE_UPDATE_H_RETRACE_EN_MASK;
2182 	WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2183 
2184 	/* set pageflip to happen only at start of vblank interval (front porch) */
2185 	WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 3);
2186 
2187 	if (!atomic && fb && fb != crtc->primary->fb) {
2188 		amdgpu_fb = to_amdgpu_framebuffer(fb);
2189 		rbo = gem_to_amdgpu_bo(amdgpu_fb->obj);
2190 		r = amdgpu_bo_reserve(rbo, false);
2191 		if (unlikely(r != 0))
2192 			return r;
2193 		amdgpu_bo_unpin(rbo);
2194 		amdgpu_bo_unreserve(rbo);
2195 	}
2196 
2197 	/* Bytes per pixel may have changed */
2198 	dce_v8_0_bandwidth_update(adev);
2199 
2200 	return 0;
2201 }
2202 
2203 static void dce_v8_0_set_interleave(struct drm_crtc *crtc,
2204 				    struct drm_display_mode *mode)
2205 {
2206 	struct drm_device *dev = crtc->dev;
2207 	struct amdgpu_device *adev = dev->dev_private;
2208 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2209 
2210 	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
2211 		WREG32(mmLB_DATA_FORMAT + amdgpu_crtc->crtc_offset,
2212 		       LB_DATA_FORMAT__INTERLEAVE_EN__SHIFT);
2213 	else
2214 		WREG32(mmLB_DATA_FORMAT + amdgpu_crtc->crtc_offset, 0);
2215 }
2216 
2217 static void dce_v8_0_crtc_load_lut(struct drm_crtc *crtc)
2218 {
2219 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2220 	struct drm_device *dev = crtc->dev;
2221 	struct amdgpu_device *adev = dev->dev_private;
2222 	int i;
2223 
2224 	DRM_DEBUG_KMS("%d\n", amdgpu_crtc->crtc_id);
2225 
2226 	WREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
2227 	       ((INPUT_CSC_BYPASS << INPUT_CSC_CONTROL__INPUT_CSC_GRPH_MODE__SHIFT) |
2228 		(INPUT_CSC_BYPASS << INPUT_CSC_CONTROL__INPUT_CSC_OVL_MODE__SHIFT)));
2229 	WREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset,
2230 	       PRESCALE_GRPH_CONTROL__GRPH_PRESCALE_BYPASS_MASK);
2231 	WREG32(mmPRESCALE_OVL_CONTROL + amdgpu_crtc->crtc_offset,
2232 	       PRESCALE_OVL_CONTROL__OVL_PRESCALE_BYPASS_MASK);
2233 	WREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset,
2234 	       ((INPUT_GAMMA_USE_LUT << INPUT_GAMMA_CONTROL__GRPH_INPUT_GAMMA_MODE__SHIFT) |
2235 		(INPUT_GAMMA_USE_LUT << INPUT_GAMMA_CONTROL__OVL_INPUT_GAMMA_MODE__SHIFT)));
2236 
2237 	WREG32(mmDC_LUT_CONTROL + amdgpu_crtc->crtc_offset, 0);
2238 
2239 	WREG32(mmDC_LUT_BLACK_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0);
2240 	WREG32(mmDC_LUT_BLACK_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0);
2241 	WREG32(mmDC_LUT_BLACK_OFFSET_RED + amdgpu_crtc->crtc_offset, 0);
2242 
2243 	WREG32(mmDC_LUT_WHITE_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0xffff);
2244 	WREG32(mmDC_LUT_WHITE_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0xffff);
2245 	WREG32(mmDC_LUT_WHITE_OFFSET_RED + amdgpu_crtc->crtc_offset, 0xffff);
2246 
2247 	WREG32(mmDC_LUT_RW_MODE + amdgpu_crtc->crtc_offset, 0);
2248 	WREG32(mmDC_LUT_WRITE_EN_MASK + amdgpu_crtc->crtc_offset, 0x00000007);
2249 
2250 	WREG32(mmDC_LUT_RW_INDEX + amdgpu_crtc->crtc_offset, 0);
2251 	for (i = 0; i < 256; i++) {
2252 		WREG32(mmDC_LUT_30_COLOR + amdgpu_crtc->crtc_offset,
2253 		       (amdgpu_crtc->lut_r[i] << 20) |
2254 		       (amdgpu_crtc->lut_g[i] << 10) |
2255 		       (amdgpu_crtc->lut_b[i] << 0));
2256 	}
2257 
2258 	WREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
2259 	       ((DEGAMMA_BYPASS << DEGAMMA_CONTROL__GRPH_DEGAMMA_MODE__SHIFT) |
2260 		(DEGAMMA_BYPASS << DEGAMMA_CONTROL__OVL_DEGAMMA_MODE__SHIFT) |
2261 		(DEGAMMA_BYPASS << DEGAMMA_CONTROL__CURSOR_DEGAMMA_MODE__SHIFT)));
2262 	WREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset,
2263 	       ((GAMUT_REMAP_BYPASS << GAMUT_REMAP_CONTROL__GRPH_GAMUT_REMAP_MODE__SHIFT) |
2264 		(GAMUT_REMAP_BYPASS << GAMUT_REMAP_CONTROL__OVL_GAMUT_REMAP_MODE__SHIFT)));
2265 	WREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
2266 	       ((REGAMMA_BYPASS << REGAMMA_CONTROL__GRPH_REGAMMA_MODE__SHIFT) |
2267 		(REGAMMA_BYPASS << REGAMMA_CONTROL__OVL_REGAMMA_MODE__SHIFT)));
2268 	WREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
2269 	       ((OUTPUT_CSC_BYPASS << OUTPUT_CSC_CONTROL__OUTPUT_CSC_GRPH_MODE__SHIFT) |
2270 		(OUTPUT_CSC_BYPASS << OUTPUT_CSC_CONTROL__OUTPUT_CSC_OVL_MODE__SHIFT)));
2271 	/* XXX match this to the depth of the crtc fmt block, move to modeset? */
2272 	WREG32(0x1a50 + amdgpu_crtc->crtc_offset, 0);
2273 	/* XXX this only needs to be programmed once per crtc at startup,
2274 	 * not sure where the best place for it is
2275 	 */
2276 	WREG32(mmALPHA_CONTROL + amdgpu_crtc->crtc_offset,
2277 	       ALPHA_CONTROL__CURSOR_ALPHA_BLND_ENA_MASK);
2278 }
2279 
2280 static int dce_v8_0_pick_dig_encoder(struct drm_encoder *encoder)
2281 {
2282 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
2283 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
2284 
2285 	switch (amdgpu_encoder->encoder_id) {
2286 	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
2287 		if (dig->linkb)
2288 			return 1;
2289 		else
2290 			return 0;
2291 		break;
2292 	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
2293 		if (dig->linkb)
2294 			return 3;
2295 		else
2296 			return 2;
2297 		break;
2298 	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
2299 		if (dig->linkb)
2300 			return 5;
2301 		else
2302 			return 4;
2303 		break;
2304 	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
2305 		return 6;
2306 		break;
2307 	default:
2308 		DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder->encoder_id);
2309 		return 0;
2310 	}
2311 }
2312 
2313 /**
2314  * dce_v8_0_pick_pll - Allocate a PPLL for use by the crtc.
2315  *
2316  * @crtc: drm crtc
2317  *
2318  * Returns the PPLL (Pixel PLL) to be used by the crtc.  For DP monitors
2319  * a single PPLL can be used for all DP crtcs/encoders.  For non-DP
2320  * monitors a dedicated PPLL must be used.  If a particular board has
2321  * an external DP PLL, return ATOM_PPLL_INVALID to skip PLL programming
2322  * as there is no need to program the PLL itself.  If we are not able to
2323  * allocate a PLL, return ATOM_PPLL_INVALID to skip PLL programming to
2324  * avoid messing up an existing monitor.
2325  *
2326  * Asic specific PLL information
2327  *
2328  * DCE 8.x
2329  * KB/KV
2330  * - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP)
2331  * CI
2332  * - PPLL0, PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC
2333  *
2334  */
2335 static u32 dce_v8_0_pick_pll(struct drm_crtc *crtc)
2336 {
2337 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2338 	struct drm_device *dev = crtc->dev;
2339 	struct amdgpu_device *adev = dev->dev_private;
2340 	u32 pll_in_use;
2341 	int pll;
2342 
2343 	if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder))) {
2344 		if (adev->clock.dp_extclk)
2345 			/* skip PPLL programming if using ext clock */
2346 			return ATOM_PPLL_INVALID;
2347 		else {
2348 			/* use the same PPLL for all DP monitors */
2349 			pll = amdgpu_pll_get_shared_dp_ppll(crtc);
2350 			if (pll != ATOM_PPLL_INVALID)
2351 				return pll;
2352 		}
2353 	} else {
2354 		/* use the same PPLL for all monitors with the same clock */
2355 		pll = amdgpu_pll_get_shared_nondp_ppll(crtc);
2356 		if (pll != ATOM_PPLL_INVALID)
2357 			return pll;
2358 	}
2359 	/* otherwise, pick one of the plls */
2360 	if ((adev->asic_type == CHIP_KABINI) ||
2361 	    (adev->asic_type == CHIP_MULLINS)) {
2362 		/* KB/ML has PPLL1 and PPLL2 */
2363 		pll_in_use = amdgpu_pll_get_use_mask(crtc);
2364 		if (!(pll_in_use & (1 << ATOM_PPLL2)))
2365 			return ATOM_PPLL2;
2366 		if (!(pll_in_use & (1 << ATOM_PPLL1)))
2367 			return ATOM_PPLL1;
2368 		DRM_ERROR("unable to allocate a PPLL\n");
2369 		return ATOM_PPLL_INVALID;
2370 	} else {
2371 		/* CI/KV has PPLL0, PPLL1, and PPLL2 */
2372 		pll_in_use = amdgpu_pll_get_use_mask(crtc);
2373 		if (!(pll_in_use & (1 << ATOM_PPLL2)))
2374 			return ATOM_PPLL2;
2375 		if (!(pll_in_use & (1 << ATOM_PPLL1)))
2376 			return ATOM_PPLL1;
2377 		if (!(pll_in_use & (1 << ATOM_PPLL0)))
2378 			return ATOM_PPLL0;
2379 		DRM_ERROR("unable to allocate a PPLL\n");
2380 		return ATOM_PPLL_INVALID;
2381 	}
2382 	return ATOM_PPLL_INVALID;
2383 }
2384 
2385 static void dce_v8_0_lock_cursor(struct drm_crtc *crtc, bool lock)
2386 {
2387 	struct amdgpu_device *adev = crtc->dev->dev_private;
2388 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2389 	uint32_t cur_lock;
2390 
2391 	cur_lock = RREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset);
2392 	if (lock)
2393 		cur_lock |= CUR_UPDATE__CURSOR_UPDATE_LOCK_MASK;
2394 	else
2395 		cur_lock &= ~CUR_UPDATE__CURSOR_UPDATE_LOCK_MASK;
2396 	WREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset, cur_lock);
2397 }
2398 
2399 static void dce_v8_0_hide_cursor(struct drm_crtc *crtc)
2400 {
2401 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2402 	struct amdgpu_device *adev = crtc->dev->dev_private;
2403 
2404 	WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
2405 		   (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
2406 		   (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
2407 }
2408 
2409 static void dce_v8_0_show_cursor(struct drm_crtc *crtc)
2410 {
2411 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2412 	struct amdgpu_device *adev = crtc->dev->dev_private;
2413 
2414 	WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
2415 	       upper_32_bits(amdgpu_crtc->cursor_addr));
2416 	WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
2417 	       lower_32_bits(amdgpu_crtc->cursor_addr));
2418 
2419 	WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
2420 		   CUR_CONTROL__CURSOR_EN_MASK |
2421 		   (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
2422 		   (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
2423 }
2424 
2425 static int dce_v8_0_cursor_move_locked(struct drm_crtc *crtc,
2426 				       int x, int y)
2427 {
2428 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2429 	struct amdgpu_device *adev = crtc->dev->dev_private;
2430 	int xorigin = 0, yorigin = 0;
2431 
2432 	/* avivo cursor are offset into the total surface */
2433 	x += crtc->x;
2434 	y += crtc->y;
2435 	DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
2436 
2437 	if (x < 0) {
2438 		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
2439 		x = 0;
2440 	}
2441 	if (y < 0) {
2442 		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
2443 		y = 0;
2444 	}
2445 
2446 	WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
2447 	WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
2448 	WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
2449 	       ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
2450 
2451 	amdgpu_crtc->cursor_x = x;
2452 	amdgpu_crtc->cursor_y = y;
2453 
2454 	return 0;
2455 }
2456 
2457 static int dce_v8_0_crtc_cursor_move(struct drm_crtc *crtc,
2458 				     int x, int y)
2459 {
2460 	int ret;
2461 
2462 	dce_v8_0_lock_cursor(crtc, true);
2463 	ret = dce_v8_0_cursor_move_locked(crtc, x, y);
2464 	dce_v8_0_lock_cursor(crtc, false);
2465 
2466 	return ret;
2467 }
2468 
2469 static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc,
2470 				     struct drm_file *file_priv,
2471 				     uint32_t handle,
2472 				     uint32_t width,
2473 				     uint32_t height,
2474 				     int32_t hot_x,
2475 				     int32_t hot_y)
2476 {
2477 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2478 	struct drm_gem_object *obj;
2479 	struct amdgpu_bo *aobj;
2480 	int ret;
2481 
2482 	if (!handle) {
2483 		/* turn off cursor */
2484 		dce_v8_0_hide_cursor(crtc);
2485 		obj = NULL;
2486 		goto unpin;
2487 	}
2488 
2489 	if ((width > amdgpu_crtc->max_cursor_width) ||
2490 	    (height > amdgpu_crtc->max_cursor_height)) {
2491 		DRM_ERROR("bad cursor width or height %d x %d\n", width, height);
2492 		return -EINVAL;
2493 	}
2494 
2495 	obj = drm_gem_object_lookup(crtc->dev, file_priv, handle);
2496 	if (!obj) {
2497 		DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, amdgpu_crtc->crtc_id);
2498 		return -ENOENT;
2499 	}
2500 
2501 	aobj = gem_to_amdgpu_bo(obj);
2502 	ret = amdgpu_bo_reserve(aobj, false);
2503 	if (ret != 0) {
2504 		drm_gem_object_unreference_unlocked(obj);
2505 		return ret;
2506 	}
2507 
2508 	ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM, &amdgpu_crtc->cursor_addr);
2509 	amdgpu_bo_unreserve(aobj);
2510 	if (ret) {
2511 		DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
2512 		drm_gem_object_unreference_unlocked(obj);
2513 		return ret;
2514 	}
2515 
2516 	amdgpu_crtc->cursor_width = width;
2517 	amdgpu_crtc->cursor_height = height;
2518 
2519 	dce_v8_0_lock_cursor(crtc, true);
2520 
2521 	if (hot_x != amdgpu_crtc->cursor_hot_x ||
2522 	    hot_y != amdgpu_crtc->cursor_hot_y) {
2523 		int x, y;
2524 
2525 		x = amdgpu_crtc->cursor_x + amdgpu_crtc->cursor_hot_x - hot_x;
2526 		y = amdgpu_crtc->cursor_y + amdgpu_crtc->cursor_hot_y - hot_y;
2527 
2528 		dce_v8_0_cursor_move_locked(crtc, x, y);
2529 
2530 		amdgpu_crtc->cursor_hot_x = hot_x;
2531 		amdgpu_crtc->cursor_hot_y = hot_y;
2532 	}
2533 
2534 	dce_v8_0_show_cursor(crtc);
2535 	dce_v8_0_lock_cursor(crtc, false);
2536 
2537 unpin:
2538 	if (amdgpu_crtc->cursor_bo) {
2539 		struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
2540 		ret = amdgpu_bo_reserve(aobj, false);
2541 		if (likely(ret == 0)) {
2542 			amdgpu_bo_unpin(aobj);
2543 			amdgpu_bo_unreserve(aobj);
2544 		}
2545 		drm_gem_object_unreference_unlocked(amdgpu_crtc->cursor_bo);
2546 	}
2547 
2548 	amdgpu_crtc->cursor_bo = obj;
2549 	return 0;
2550 }
2551 
2552 static void dce_v8_0_cursor_reset(struct drm_crtc *crtc)
2553 {
2554 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2555 
2556 	if (amdgpu_crtc->cursor_bo) {
2557 		dce_v8_0_lock_cursor(crtc, true);
2558 
2559 		dce_v8_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
2560 					    amdgpu_crtc->cursor_y);
2561 
2562 		dce_v8_0_show_cursor(crtc);
2563 
2564 		dce_v8_0_lock_cursor(crtc, false);
2565 	}
2566 }
2567 
2568 static void dce_v8_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
2569 				    u16 *blue, uint32_t start, uint32_t size)
2570 {
2571 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2572 	int end = (start + size > 256) ? 256 : start + size, i;
2573 
2574 	/* userspace palettes are always correct as is */
2575 	for (i = start; i < end; i++) {
2576 		amdgpu_crtc->lut_r[i] = red[i] >> 6;
2577 		amdgpu_crtc->lut_g[i] = green[i] >> 6;
2578 		amdgpu_crtc->lut_b[i] = blue[i] >> 6;
2579 	}
2580 	dce_v8_0_crtc_load_lut(crtc);
2581 }
2582 
2583 static void dce_v8_0_crtc_destroy(struct drm_crtc *crtc)
2584 {
2585 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2586 
2587 	drm_crtc_cleanup(crtc);
2588 	destroy_workqueue(amdgpu_crtc->pflip_queue);
2589 	kfree(amdgpu_crtc);
2590 }
2591 
2592 static const struct drm_crtc_funcs dce_v8_0_crtc_funcs = {
2593 	.cursor_set2 = dce_v8_0_crtc_cursor_set2,
2594 	.cursor_move = dce_v8_0_crtc_cursor_move,
2595 	.gamma_set = dce_v8_0_crtc_gamma_set,
2596 	.set_config = amdgpu_crtc_set_config,
2597 	.destroy = dce_v8_0_crtc_destroy,
2598 	.page_flip = amdgpu_crtc_page_flip,
2599 };
2600 
2601 static void dce_v8_0_crtc_dpms(struct drm_crtc *crtc, int mode)
2602 {
2603 	struct drm_device *dev = crtc->dev;
2604 	struct amdgpu_device *adev = dev->dev_private;
2605 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2606 	unsigned type;
2607 
2608 	switch (mode) {
2609 	case DRM_MODE_DPMS_ON:
2610 		amdgpu_crtc->enabled = true;
2611 		amdgpu_atombios_crtc_enable(crtc, ATOM_ENABLE);
2612 		dce_v8_0_vga_enable(crtc, true);
2613 		amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
2614 		dce_v8_0_vga_enable(crtc, false);
2615 		/* Make sure VBLANK interrupt is still enabled */
2616 		type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
2617 		amdgpu_irq_update(adev, &adev->crtc_irq, type);
2618 		drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id);
2619 		dce_v8_0_crtc_load_lut(crtc);
2620 		break;
2621 	case DRM_MODE_DPMS_STANDBY:
2622 	case DRM_MODE_DPMS_SUSPEND:
2623 	case DRM_MODE_DPMS_OFF:
2624 		drm_vblank_pre_modeset(dev, amdgpu_crtc->crtc_id);
2625 		if (amdgpu_crtc->enabled) {
2626 			dce_v8_0_vga_enable(crtc, true);
2627 			amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE);
2628 			dce_v8_0_vga_enable(crtc, false);
2629 		}
2630 		amdgpu_atombios_crtc_enable(crtc, ATOM_DISABLE);
2631 		amdgpu_crtc->enabled = false;
2632 		break;
2633 	}
2634 	/* adjust pm to dpms */
2635 	amdgpu_pm_compute_clocks(adev);
2636 }
2637 
2638 static void dce_v8_0_crtc_prepare(struct drm_crtc *crtc)
2639 {
2640 	/* disable crtc pair power gating before programming */
2641 	amdgpu_atombios_crtc_powergate(crtc, ATOM_DISABLE);
2642 	amdgpu_atombios_crtc_lock(crtc, ATOM_ENABLE);
2643 	dce_v8_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
2644 }
2645 
2646 static void dce_v8_0_crtc_commit(struct drm_crtc *crtc)
2647 {
2648 	dce_v8_0_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
2649 	amdgpu_atombios_crtc_lock(crtc, ATOM_DISABLE);
2650 }
2651 
2652 static void dce_v8_0_crtc_disable(struct drm_crtc *crtc)
2653 {
2654 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2655 	struct drm_device *dev = crtc->dev;
2656 	struct amdgpu_device *adev = dev->dev_private;
2657 	struct amdgpu_atom_ss ss;
2658 	int i;
2659 
2660 	dce_v8_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
2661 	if (crtc->primary->fb) {
2662 		int r;
2663 		struct amdgpu_framebuffer *amdgpu_fb;
2664 		struct amdgpu_bo *rbo;
2665 
2666 		amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
2667 		rbo = gem_to_amdgpu_bo(amdgpu_fb->obj);
2668 		r = amdgpu_bo_reserve(rbo, false);
2669 		if (unlikely(r))
2670 			DRM_ERROR("failed to reserve rbo before unpin\n");
2671 		else {
2672 			amdgpu_bo_unpin(rbo);
2673 			amdgpu_bo_unreserve(rbo);
2674 		}
2675 	}
2676 	/* disable the GRPH */
2677 	dce_v8_0_grph_enable(crtc, false);
2678 
2679 	amdgpu_atombios_crtc_powergate(crtc, ATOM_ENABLE);
2680 
2681 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
2682 		if (adev->mode_info.crtcs[i] &&
2683 		    adev->mode_info.crtcs[i]->enabled &&
2684 		    i != amdgpu_crtc->crtc_id &&
2685 		    amdgpu_crtc->pll_id == adev->mode_info.crtcs[i]->pll_id) {
2686 			/* one other crtc is using this pll don't turn
2687 			 * off the pll
2688 			 */
2689 			goto done;
2690 		}
2691 	}
2692 
2693 	switch (amdgpu_crtc->pll_id) {
2694 	case ATOM_PPLL1:
2695 	case ATOM_PPLL2:
2696 		/* disable the ppll */
2697 		amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id,
2698 					  0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
2699 		break;
2700 	case ATOM_PPLL0:
2701 		/* disable the ppll */
2702 		if ((adev->asic_type == CHIP_KAVERI) ||
2703 		    (adev->asic_type == CHIP_BONAIRE) ||
2704 		    (adev->asic_type == CHIP_HAWAII))
2705 			amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id,
2706 						  0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
2707 		break;
2708 	default:
2709 		break;
2710 	}
2711 done:
2712 	amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
2713 	amdgpu_crtc->adjusted_clock = 0;
2714 	amdgpu_crtc->encoder = NULL;
2715 	amdgpu_crtc->connector = NULL;
2716 }
2717 
2718 static int dce_v8_0_crtc_mode_set(struct drm_crtc *crtc,
2719 				  struct drm_display_mode *mode,
2720 				  struct drm_display_mode *adjusted_mode,
2721 				  int x, int y, struct drm_framebuffer *old_fb)
2722 {
2723 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2724 
2725 	if (!amdgpu_crtc->adjusted_clock)
2726 		return -EINVAL;
2727 
2728 	amdgpu_atombios_crtc_set_pll(crtc, adjusted_mode);
2729 	amdgpu_atombios_crtc_set_dtd_timing(crtc, adjusted_mode);
2730 	dce_v8_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
2731 	amdgpu_atombios_crtc_overscan_setup(crtc, mode, adjusted_mode);
2732 	amdgpu_atombios_crtc_scaler_setup(crtc);
2733 	dce_v8_0_cursor_reset(crtc);
2734 	/* update the hw version fpr dpm */
2735 	amdgpu_crtc->hw_mode = *adjusted_mode;
2736 
2737 	return 0;
2738 }
2739 
2740 static bool dce_v8_0_crtc_mode_fixup(struct drm_crtc *crtc,
2741 				     const struct drm_display_mode *mode,
2742 				     struct drm_display_mode *adjusted_mode)
2743 {
2744 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2745 	struct drm_device *dev = crtc->dev;
2746 	struct drm_encoder *encoder;
2747 
2748 	/* assign the encoder to the amdgpu crtc to avoid repeated lookups later */
2749 	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
2750 		if (encoder->crtc == crtc) {
2751 			amdgpu_crtc->encoder = encoder;
2752 			amdgpu_crtc->connector = amdgpu_get_connector_for_encoder(encoder);
2753 			break;
2754 		}
2755 	}
2756 	if ((amdgpu_crtc->encoder == NULL) || (amdgpu_crtc->connector == NULL)) {
2757 		amdgpu_crtc->encoder = NULL;
2758 		amdgpu_crtc->connector = NULL;
2759 		return false;
2760 	}
2761 	if (!amdgpu_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
2762 		return false;
2763 	if (amdgpu_atombios_crtc_prepare_pll(crtc, adjusted_mode))
2764 		return false;
2765 	/* pick pll */
2766 	amdgpu_crtc->pll_id = dce_v8_0_pick_pll(crtc);
2767 	/* if we can't get a PPLL for a non-DP encoder, fail */
2768 	if ((amdgpu_crtc->pll_id == ATOM_PPLL_INVALID) &&
2769 	    !ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder)))
2770 		return false;
2771 
2772 	return true;
2773 }
2774 
2775 static int dce_v8_0_crtc_set_base(struct drm_crtc *crtc, int x, int y,
2776 				  struct drm_framebuffer *old_fb)
2777 {
2778 	return dce_v8_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
2779 }
2780 
2781 static int dce_v8_0_crtc_set_base_atomic(struct drm_crtc *crtc,
2782 					 struct drm_framebuffer *fb,
2783 					 int x, int y, enum mode_set_atomic state)
2784 {
2785        return dce_v8_0_crtc_do_set_base(crtc, fb, x, y, 1);
2786 }
2787 
2788 static const struct drm_crtc_helper_funcs dce_v8_0_crtc_helper_funcs = {
2789 	.dpms = dce_v8_0_crtc_dpms,
2790 	.mode_fixup = dce_v8_0_crtc_mode_fixup,
2791 	.mode_set = dce_v8_0_crtc_mode_set,
2792 	.mode_set_base = dce_v8_0_crtc_set_base,
2793 	.mode_set_base_atomic = dce_v8_0_crtc_set_base_atomic,
2794 	.prepare = dce_v8_0_crtc_prepare,
2795 	.commit = dce_v8_0_crtc_commit,
2796 	.load_lut = dce_v8_0_crtc_load_lut,
2797 	.disable = dce_v8_0_crtc_disable,
2798 };
2799 
2800 static int dce_v8_0_crtc_init(struct amdgpu_device *adev, int index)
2801 {
2802 	struct amdgpu_crtc *amdgpu_crtc;
2803 	int i;
2804 
2805 	amdgpu_crtc = kzalloc(sizeof(struct amdgpu_crtc) +
2806 			      (AMDGPUFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
2807 	if (amdgpu_crtc == NULL)
2808 		return -ENOMEM;
2809 
2810 	drm_crtc_init(adev->ddev, &amdgpu_crtc->base, &dce_v8_0_crtc_funcs);
2811 
2812 	drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256);
2813 	amdgpu_crtc->crtc_id = index;
2814 	amdgpu_crtc->pflip_queue = create_singlethread_workqueue("amdgpu-pageflip-queue");
2815 	adev->mode_info.crtcs[index] = amdgpu_crtc;
2816 
2817 	amdgpu_crtc->max_cursor_width = CIK_CURSOR_WIDTH;
2818 	amdgpu_crtc->max_cursor_height = CIK_CURSOR_HEIGHT;
2819 	adev->ddev->mode_config.cursor_width = amdgpu_crtc->max_cursor_width;
2820 	adev->ddev->mode_config.cursor_height = amdgpu_crtc->max_cursor_height;
2821 
2822 	for (i = 0; i < 256; i++) {
2823 		amdgpu_crtc->lut_r[i] = i << 2;
2824 		amdgpu_crtc->lut_g[i] = i << 2;
2825 		amdgpu_crtc->lut_b[i] = i << 2;
2826 	}
2827 
2828 	amdgpu_crtc->crtc_offset = crtc_offsets[amdgpu_crtc->crtc_id];
2829 
2830 	amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
2831 	amdgpu_crtc->adjusted_clock = 0;
2832 	amdgpu_crtc->encoder = NULL;
2833 	amdgpu_crtc->connector = NULL;
2834 	drm_crtc_helper_add(&amdgpu_crtc->base, &dce_v8_0_crtc_helper_funcs);
2835 
2836 	return 0;
2837 }
2838 
2839 static int dce_v8_0_early_init(void *handle)
2840 {
2841 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2842 
2843 	adev->audio_endpt_rreg = &dce_v8_0_audio_endpt_rreg;
2844 	adev->audio_endpt_wreg = &dce_v8_0_audio_endpt_wreg;
2845 
2846 	dce_v8_0_set_display_funcs(adev);
2847 	dce_v8_0_set_irq_funcs(adev);
2848 
2849 	switch (adev->asic_type) {
2850 	case CHIP_BONAIRE:
2851 	case CHIP_HAWAII:
2852 		adev->mode_info.num_crtc = 6;
2853 		adev->mode_info.num_hpd = 6;
2854 		adev->mode_info.num_dig = 6;
2855 		break;
2856 	case CHIP_KAVERI:
2857 		adev->mode_info.num_crtc = 4;
2858 		adev->mode_info.num_hpd = 6;
2859 		adev->mode_info.num_dig = 7;
2860 		break;
2861 	case CHIP_KABINI:
2862 	case CHIP_MULLINS:
2863 		adev->mode_info.num_crtc = 2;
2864 		adev->mode_info.num_hpd = 6;
2865 		adev->mode_info.num_dig = 6; /* ? */
2866 		break;
2867 	default:
2868 		/* FIXME: not supported yet */
2869 		return -EINVAL;
2870 	}
2871 
2872 	return 0;
2873 }
2874 
2875 static int dce_v8_0_sw_init(void *handle)
2876 {
2877 	int r, i;
2878 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2879 
2880 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
2881 		r = amdgpu_irq_add_id(adev, i + 1, &adev->crtc_irq);
2882 		if (r)
2883 			return r;
2884 	}
2885 
2886 	for (i = 8; i < 20; i += 2) {
2887 		r = amdgpu_irq_add_id(adev, i, &adev->pageflip_irq);
2888 		if (r)
2889 			return r;
2890 	}
2891 
2892 	/* HPD hotplug */
2893 	r = amdgpu_irq_add_id(adev, 42, &adev->hpd_irq);
2894 	if (r)
2895 		return r;
2896 
2897 	adev->mode_info.mode_config_initialized = true;
2898 
2899 	adev->ddev->mode_config.funcs = &amdgpu_mode_funcs;
2900 
2901 	adev->ddev->mode_config.max_width = 16384;
2902 	adev->ddev->mode_config.max_height = 16384;
2903 
2904 	adev->ddev->mode_config.preferred_depth = 24;
2905 	adev->ddev->mode_config.prefer_shadow = 1;
2906 
2907 	adev->ddev->mode_config.fb_base = adev->mc.aper_base;
2908 
2909 	r = amdgpu_modeset_create_props(adev);
2910 	if (r)
2911 		return r;
2912 
2913 	adev->ddev->mode_config.max_width = 16384;
2914 	adev->ddev->mode_config.max_height = 16384;
2915 
2916 	/* allocate crtcs */
2917 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
2918 		r = dce_v8_0_crtc_init(adev, i);
2919 		if (r)
2920 			return r;
2921 	}
2922 
2923 	if (amdgpu_atombios_get_connector_info_from_object_table(adev))
2924 		amdgpu_print_display_setup(adev->ddev);
2925 	else
2926 		return -EINVAL;
2927 
2928 	/* setup afmt */
2929 	dce_v8_0_afmt_init(adev);
2930 
2931 	r = dce_v8_0_audio_init(adev);
2932 	if (r)
2933 		return r;
2934 
2935 	drm_kms_helper_poll_init(adev->ddev);
2936 
2937 	return r;
2938 }
2939 
2940 static int dce_v8_0_sw_fini(void *handle)
2941 {
2942 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2943 
2944 	kfree(adev->mode_info.bios_hardcoded_edid);
2945 
2946 	drm_kms_helper_poll_fini(adev->ddev);
2947 
2948 	dce_v8_0_audio_fini(adev);
2949 
2950 	dce_v8_0_afmt_fini(adev);
2951 
2952 	drm_mode_config_cleanup(adev->ddev);
2953 	adev->mode_info.mode_config_initialized = false;
2954 
2955 	return 0;
2956 }
2957 
2958 static int dce_v8_0_hw_init(void *handle)
2959 {
2960 	int i;
2961 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2962 
2963 	/* init dig PHYs, disp eng pll */
2964 	amdgpu_atombios_encoder_init_dig(adev);
2965 	amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk);
2966 
2967 	/* initialize hpd */
2968 	dce_v8_0_hpd_init(adev);
2969 
2970 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
2971 		dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
2972 	}
2973 
2974 	return 0;
2975 }
2976 
2977 static int dce_v8_0_hw_fini(void *handle)
2978 {
2979 	int i;
2980 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2981 
2982 	dce_v8_0_hpd_fini(adev);
2983 
2984 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
2985 		dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
2986 	}
2987 
2988 	return 0;
2989 }
2990 
2991 static int dce_v8_0_suspend(void *handle)
2992 {
2993 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2994 
2995 	amdgpu_atombios_scratch_regs_save(adev);
2996 
2997 	return dce_v8_0_hw_fini(handle);
2998 }
2999 
3000 static int dce_v8_0_resume(void *handle)
3001 {
3002 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3003 	int ret;
3004 
3005 	ret = dce_v8_0_hw_init(handle);
3006 
3007 	amdgpu_atombios_scratch_regs_restore(adev);
3008 
3009 	/* turn on the BL */
3010 	if (adev->mode_info.bl_encoder) {
3011 		u8 bl_level = amdgpu_display_backlight_get_level(adev,
3012 								  adev->mode_info.bl_encoder);
3013 		amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder,
3014 						    bl_level);
3015 	}
3016 
3017 	return ret;
3018 }
3019 
3020 static bool dce_v8_0_is_idle(void *handle)
3021 {
3022 	return true;
3023 }
3024 
3025 static int dce_v8_0_wait_for_idle(void *handle)
3026 {
3027 	return 0;
3028 }
3029 
3030 static void dce_v8_0_print_status(void *handle)
3031 {
3032 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3033 
3034 	dev_info(adev->dev, "DCE 8.x registers\n");
3035 	/* XXX todo */
3036 }
3037 
3038 static int dce_v8_0_soft_reset(void *handle)
3039 {
3040 	u32 srbm_soft_reset = 0, tmp;
3041 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3042 
3043 	if (dce_v8_0_is_display_hung(adev))
3044 		srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK;
3045 
3046 	if (srbm_soft_reset) {
3047 		dce_v8_0_print_status((void *)adev);
3048 
3049 		tmp = RREG32(mmSRBM_SOFT_RESET);
3050 		tmp |= srbm_soft_reset;
3051 		dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
3052 		WREG32(mmSRBM_SOFT_RESET, tmp);
3053 		tmp = RREG32(mmSRBM_SOFT_RESET);
3054 
3055 		udelay(50);
3056 
3057 		tmp &= ~srbm_soft_reset;
3058 		WREG32(mmSRBM_SOFT_RESET, tmp);
3059 		tmp = RREG32(mmSRBM_SOFT_RESET);
3060 
3061 		/* Wait a little for things to settle down */
3062 		udelay(50);
3063 		dce_v8_0_print_status((void *)adev);
3064 	}
3065 	return 0;
3066 }
3067 
3068 static void dce_v8_0_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev,
3069 						     int crtc,
3070 						     enum amdgpu_interrupt_state state)
3071 {
3072 	u32 reg_block, lb_interrupt_mask;
3073 
3074 	if (crtc >= adev->mode_info.num_crtc) {
3075 		DRM_DEBUG("invalid crtc %d\n", crtc);
3076 		return;
3077 	}
3078 
3079 	switch (crtc) {
3080 	case 0:
3081 		reg_block = CRTC0_REGISTER_OFFSET;
3082 		break;
3083 	case 1:
3084 		reg_block = CRTC1_REGISTER_OFFSET;
3085 		break;
3086 	case 2:
3087 		reg_block = CRTC2_REGISTER_OFFSET;
3088 		break;
3089 	case 3:
3090 		reg_block = CRTC3_REGISTER_OFFSET;
3091 		break;
3092 	case 4:
3093 		reg_block = CRTC4_REGISTER_OFFSET;
3094 		break;
3095 	case 5:
3096 		reg_block = CRTC5_REGISTER_OFFSET;
3097 		break;
3098 	default:
3099 		DRM_DEBUG("invalid crtc %d\n", crtc);
3100 		return;
3101 	}
3102 
3103 	switch (state) {
3104 	case AMDGPU_IRQ_STATE_DISABLE:
3105 		lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + reg_block);
3106 		lb_interrupt_mask &= ~LB_INTERRUPT_MASK__VBLANK_INTERRUPT_MASK_MASK;
3107 		WREG32(mmLB_INTERRUPT_MASK + reg_block, lb_interrupt_mask);
3108 		break;
3109 	case AMDGPU_IRQ_STATE_ENABLE:
3110 		lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + reg_block);
3111 		lb_interrupt_mask |= LB_INTERRUPT_MASK__VBLANK_INTERRUPT_MASK_MASK;
3112 		WREG32(mmLB_INTERRUPT_MASK + reg_block, lb_interrupt_mask);
3113 		break;
3114 	default:
3115 		break;
3116 	}
3117 }
3118 
3119 static void dce_v8_0_set_crtc_vline_interrupt_state(struct amdgpu_device *adev,
3120 						    int crtc,
3121 						    enum amdgpu_interrupt_state state)
3122 {
3123 	u32 reg_block, lb_interrupt_mask;
3124 
3125 	if (crtc >= adev->mode_info.num_crtc) {
3126 		DRM_DEBUG("invalid crtc %d\n", crtc);
3127 		return;
3128 	}
3129 
3130 	switch (crtc) {
3131 	case 0:
3132 		reg_block = CRTC0_REGISTER_OFFSET;
3133 		break;
3134 	case 1:
3135 		reg_block = CRTC1_REGISTER_OFFSET;
3136 		break;
3137 	case 2:
3138 		reg_block = CRTC2_REGISTER_OFFSET;
3139 		break;
3140 	case 3:
3141 		reg_block = CRTC3_REGISTER_OFFSET;
3142 		break;
3143 	case 4:
3144 		reg_block = CRTC4_REGISTER_OFFSET;
3145 		break;
3146 	case 5:
3147 		reg_block = CRTC5_REGISTER_OFFSET;
3148 		break;
3149 	default:
3150 		DRM_DEBUG("invalid crtc %d\n", crtc);
3151 		return;
3152 	}
3153 
3154 	switch (state) {
3155 	case AMDGPU_IRQ_STATE_DISABLE:
3156 		lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + reg_block);
3157 		lb_interrupt_mask &= ~LB_INTERRUPT_MASK__VLINE_INTERRUPT_MASK_MASK;
3158 		WREG32(mmLB_INTERRUPT_MASK + reg_block, lb_interrupt_mask);
3159 		break;
3160 	case AMDGPU_IRQ_STATE_ENABLE:
3161 		lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + reg_block);
3162 		lb_interrupt_mask |= LB_INTERRUPT_MASK__VLINE_INTERRUPT_MASK_MASK;
3163 		WREG32(mmLB_INTERRUPT_MASK + reg_block, lb_interrupt_mask);
3164 		break;
3165 	default:
3166 		break;
3167 	}
3168 }
3169 
3170 static int dce_v8_0_set_hpd_interrupt_state(struct amdgpu_device *adev,
3171 					    struct amdgpu_irq_src *src,
3172 					    unsigned type,
3173 					    enum amdgpu_interrupt_state state)
3174 {
3175 	u32 dc_hpd_int_cntl_reg, dc_hpd_int_cntl;
3176 
3177 	switch (type) {
3178 	case AMDGPU_HPD_1:
3179 		dc_hpd_int_cntl_reg = mmDC_HPD1_INT_CONTROL;
3180 		break;
3181 	case AMDGPU_HPD_2:
3182 		dc_hpd_int_cntl_reg = mmDC_HPD2_INT_CONTROL;
3183 		break;
3184 	case AMDGPU_HPD_3:
3185 		dc_hpd_int_cntl_reg = mmDC_HPD3_INT_CONTROL;
3186 		break;
3187 	case AMDGPU_HPD_4:
3188 		dc_hpd_int_cntl_reg = mmDC_HPD4_INT_CONTROL;
3189 		break;
3190 	case AMDGPU_HPD_5:
3191 		dc_hpd_int_cntl_reg = mmDC_HPD5_INT_CONTROL;
3192 		break;
3193 	case AMDGPU_HPD_6:
3194 		dc_hpd_int_cntl_reg = mmDC_HPD6_INT_CONTROL;
3195 		break;
3196 	default:
3197 		DRM_DEBUG("invalid hdp %d\n", type);
3198 		return 0;
3199 	}
3200 
3201 	switch (state) {
3202 	case AMDGPU_IRQ_STATE_DISABLE:
3203 		dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg);
3204 		dc_hpd_int_cntl &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
3205 		WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl);
3206 		break;
3207 	case AMDGPU_IRQ_STATE_ENABLE:
3208 		dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg);
3209 		dc_hpd_int_cntl |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
3210 		WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl);
3211 		break;
3212 	default:
3213 		break;
3214 	}
3215 
3216 	return 0;
3217 }
3218 
3219 static int dce_v8_0_set_crtc_interrupt_state(struct amdgpu_device *adev,
3220 					     struct amdgpu_irq_src *src,
3221 					     unsigned type,
3222 					     enum amdgpu_interrupt_state state)
3223 {
3224 	switch (type) {
3225 	case AMDGPU_CRTC_IRQ_VBLANK1:
3226 		dce_v8_0_set_crtc_vblank_interrupt_state(adev, 0, state);
3227 		break;
3228 	case AMDGPU_CRTC_IRQ_VBLANK2:
3229 		dce_v8_0_set_crtc_vblank_interrupt_state(adev, 1, state);
3230 		break;
3231 	case AMDGPU_CRTC_IRQ_VBLANK3:
3232 		dce_v8_0_set_crtc_vblank_interrupt_state(adev, 2, state);
3233 		break;
3234 	case AMDGPU_CRTC_IRQ_VBLANK4:
3235 		dce_v8_0_set_crtc_vblank_interrupt_state(adev, 3, state);
3236 		break;
3237 	case AMDGPU_CRTC_IRQ_VBLANK5:
3238 		dce_v8_0_set_crtc_vblank_interrupt_state(adev, 4, state);
3239 		break;
3240 	case AMDGPU_CRTC_IRQ_VBLANK6:
3241 		dce_v8_0_set_crtc_vblank_interrupt_state(adev, 5, state);
3242 		break;
3243 	case AMDGPU_CRTC_IRQ_VLINE1:
3244 		dce_v8_0_set_crtc_vline_interrupt_state(adev, 0, state);
3245 		break;
3246 	case AMDGPU_CRTC_IRQ_VLINE2:
3247 		dce_v8_0_set_crtc_vline_interrupt_state(adev, 1, state);
3248 		break;
3249 	case AMDGPU_CRTC_IRQ_VLINE3:
3250 		dce_v8_0_set_crtc_vline_interrupt_state(adev, 2, state);
3251 		break;
3252 	case AMDGPU_CRTC_IRQ_VLINE4:
3253 		dce_v8_0_set_crtc_vline_interrupt_state(adev, 3, state);
3254 		break;
3255 	case AMDGPU_CRTC_IRQ_VLINE5:
3256 		dce_v8_0_set_crtc_vline_interrupt_state(adev, 4, state);
3257 		break;
3258 	case AMDGPU_CRTC_IRQ_VLINE6:
3259 		dce_v8_0_set_crtc_vline_interrupt_state(adev, 5, state);
3260 		break;
3261 	default:
3262 		break;
3263 	}
3264 	return 0;
3265 }
3266 
3267 static int dce_v8_0_crtc_irq(struct amdgpu_device *adev,
3268 			     struct amdgpu_irq_src *source,
3269 			     struct amdgpu_iv_entry *entry)
3270 {
3271 	unsigned crtc = entry->src_id - 1;
3272 	uint32_t disp_int = RREG32(interrupt_status_offsets[crtc].reg);
3273 	unsigned irq_type = amdgpu_crtc_idx_to_irq_type(adev, crtc);
3274 
3275 	switch (entry->src_data) {
3276 	case 0: /* vblank */
3277 		if (disp_int & interrupt_status_offsets[crtc].vblank)
3278 			WREG32(mmLB_VBLANK_STATUS + crtc_offsets[crtc], LB_VBLANK_STATUS__VBLANK_ACK_MASK);
3279 		else
3280 			DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3281 
3282 		if (amdgpu_irq_enabled(adev, source, irq_type)) {
3283 			drm_handle_vblank(adev->ddev, crtc);
3284 		}
3285 		DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
3286 
3287 		break;
3288 	case 1: /* vline */
3289 		if (disp_int & interrupt_status_offsets[crtc].vline)
3290 			WREG32(mmLB_VLINE_STATUS + crtc_offsets[crtc], LB_VLINE_STATUS__VLINE_ACK_MASK);
3291 		else
3292 			DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3293 
3294 		DRM_DEBUG("IH: D%d vline\n", crtc + 1);
3295 
3296 		break;
3297 	default:
3298 		DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data);
3299 		break;
3300 	}
3301 
3302 	return 0;
3303 }
3304 
3305 static int dce_v8_0_set_pageflip_interrupt_state(struct amdgpu_device *adev,
3306 						 struct amdgpu_irq_src *src,
3307 						 unsigned type,
3308 						 enum amdgpu_interrupt_state state)
3309 {
3310 	u32 reg;
3311 
3312 	if (type >= adev->mode_info.num_crtc) {
3313 		DRM_ERROR("invalid pageflip crtc %d\n", type);
3314 		return -EINVAL;
3315 	}
3316 
3317 	reg = RREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type]);
3318 	if (state == AMDGPU_IRQ_STATE_DISABLE)
3319 		WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
3320 		       reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
3321 	else
3322 		WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
3323 		       reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
3324 
3325 	return 0;
3326 }
3327 
3328 static int dce_v8_0_pageflip_irq(struct amdgpu_device *adev,
3329 				struct amdgpu_irq_src *source,
3330 				struct amdgpu_iv_entry *entry)
3331 {
3332 	unsigned long flags;
3333 	unsigned crtc_id;
3334 	struct amdgpu_crtc *amdgpu_crtc;
3335 	struct amdgpu_flip_work *works;
3336 
3337 	crtc_id = (entry->src_id - 8) >> 1;
3338 	amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
3339 
3340 	if (crtc_id >= adev->mode_info.num_crtc) {
3341 		DRM_ERROR("invalid pageflip crtc %d\n", crtc_id);
3342 		return -EINVAL;
3343 	}
3344 
3345 	if (RREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id]) &
3346 	    GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK)
3347 		WREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id],
3348 		       GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK);
3349 
3350 	/* IRQ could occur when in initial stage */
3351 	if (amdgpu_crtc == NULL)
3352 		return 0;
3353 
3354 	spin_lock_irqsave(&adev->ddev->event_lock, flags);
3355 	works = amdgpu_crtc->pflip_works;
3356 	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
3357 		DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
3358 						"AMDGPU_FLIP_SUBMITTED(%d)\n",
3359 						amdgpu_crtc->pflip_status,
3360 						AMDGPU_FLIP_SUBMITTED);
3361 		spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
3362 		return 0;
3363 	}
3364 
3365 	/* page flip completed. clean up */
3366 	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
3367 	amdgpu_crtc->pflip_works = NULL;
3368 
3369 	/* wakeup usersapce */
3370 	if (works->event)
3371 		drm_send_vblank_event(adev->ddev, crtc_id, works->event);
3372 
3373 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
3374 
3375 	drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id);
3376 	amdgpu_irq_put(adev, &adev->pageflip_irq, crtc_id);
3377 	queue_work(amdgpu_crtc->pflip_queue, &works->unpin_work);
3378 
3379 	return 0;
3380 }
3381 
3382 static int dce_v8_0_hpd_irq(struct amdgpu_device *adev,
3383 			    struct amdgpu_irq_src *source,
3384 			    struct amdgpu_iv_entry *entry)
3385 {
3386 	uint32_t disp_int, mask, int_control, tmp;
3387 	unsigned hpd;
3388 
3389 	if (entry->src_data >= adev->mode_info.num_hpd) {
3390 		DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data);
3391 		return 0;
3392 	}
3393 
3394 	hpd = entry->src_data;
3395 	disp_int = RREG32(interrupt_status_offsets[hpd].reg);
3396 	mask = interrupt_status_offsets[hpd].hpd;
3397 	int_control = hpd_int_control_offsets[hpd];
3398 
3399 	if (disp_int & mask) {
3400 		tmp = RREG32(int_control);
3401 		tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK;
3402 		WREG32(int_control, tmp);
3403 		schedule_work(&adev->hotplug_work);
3404 		DRM_DEBUG("IH: HPD%d\n", hpd + 1);
3405 	}
3406 
3407 	return 0;
3408 
3409 }
3410 
3411 static int dce_v8_0_set_clockgating_state(void *handle,
3412 					  enum amd_clockgating_state state)
3413 {
3414 	return 0;
3415 }
3416 
3417 static int dce_v8_0_set_powergating_state(void *handle,
3418 					  enum amd_powergating_state state)
3419 {
3420 	return 0;
3421 }
3422 
3423 const struct amd_ip_funcs dce_v8_0_ip_funcs = {
3424 	.early_init = dce_v8_0_early_init,
3425 	.late_init = NULL,
3426 	.sw_init = dce_v8_0_sw_init,
3427 	.sw_fini = dce_v8_0_sw_fini,
3428 	.hw_init = dce_v8_0_hw_init,
3429 	.hw_fini = dce_v8_0_hw_fini,
3430 	.suspend = dce_v8_0_suspend,
3431 	.resume = dce_v8_0_resume,
3432 	.is_idle = dce_v8_0_is_idle,
3433 	.wait_for_idle = dce_v8_0_wait_for_idle,
3434 	.soft_reset = dce_v8_0_soft_reset,
3435 	.print_status = dce_v8_0_print_status,
3436 	.set_clockgating_state = dce_v8_0_set_clockgating_state,
3437 	.set_powergating_state = dce_v8_0_set_powergating_state,
3438 };
3439 
3440 static void
3441 dce_v8_0_encoder_mode_set(struct drm_encoder *encoder,
3442 			  struct drm_display_mode *mode,
3443 			  struct drm_display_mode *adjusted_mode)
3444 {
3445 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3446 
3447 	amdgpu_encoder->pixel_clock = adjusted_mode->clock;
3448 
3449 	/* need to call this here rather than in prepare() since we need some crtc info */
3450 	amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
3451 
3452 	/* set scaler clears this on some chips */
3453 	dce_v8_0_set_interleave(encoder->crtc, mode);
3454 
3455 	if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
3456 		dce_v8_0_afmt_enable(encoder, true);
3457 		dce_v8_0_afmt_setmode(encoder, adjusted_mode);
3458 	}
3459 }
3460 
3461 static void dce_v8_0_encoder_prepare(struct drm_encoder *encoder)
3462 {
3463 	struct amdgpu_device *adev = encoder->dev->dev_private;
3464 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3465 	struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
3466 
3467 	if ((amdgpu_encoder->active_device &
3468 	     (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
3469 	    (amdgpu_encoder_get_dp_bridge_encoder_id(encoder) !=
3470 	     ENCODER_OBJECT_ID_NONE)) {
3471 		struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
3472 		if (dig) {
3473 			dig->dig_encoder = dce_v8_0_pick_dig_encoder(encoder);
3474 			if (amdgpu_encoder->active_device & ATOM_DEVICE_DFP_SUPPORT)
3475 				dig->afmt = adev->mode_info.afmt[dig->dig_encoder];
3476 		}
3477 	}
3478 
3479 	amdgpu_atombios_scratch_regs_lock(adev, true);
3480 
3481 	if (connector) {
3482 		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
3483 
3484 		/* select the clock/data port if it uses a router */
3485 		if (amdgpu_connector->router.cd_valid)
3486 			amdgpu_i2c_router_select_cd_port(amdgpu_connector);
3487 
3488 		/* turn eDP panel on for mode set */
3489 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
3490 			amdgpu_atombios_encoder_set_edp_panel_power(connector,
3491 							     ATOM_TRANSMITTER_ACTION_POWER_ON);
3492 	}
3493 
3494 	/* this is needed for the pll/ss setup to work correctly in some cases */
3495 	amdgpu_atombios_encoder_set_crtc_source(encoder);
3496 	/* set up the FMT blocks */
3497 	dce_v8_0_program_fmt(encoder);
3498 }
3499 
3500 static void dce_v8_0_encoder_commit(struct drm_encoder *encoder)
3501 {
3502 	struct drm_device *dev = encoder->dev;
3503 	struct amdgpu_device *adev = dev->dev_private;
3504 
3505 	/* need to call this here as we need the crtc set up */
3506 	amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
3507 	amdgpu_atombios_scratch_regs_lock(adev, false);
3508 }
3509 
3510 static void dce_v8_0_encoder_disable(struct drm_encoder *encoder)
3511 {
3512 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3513 	struct amdgpu_encoder_atom_dig *dig;
3514 
3515 	amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
3516 
3517 	if (amdgpu_atombios_encoder_is_digital(encoder)) {
3518 		if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
3519 			dce_v8_0_afmt_enable(encoder, false);
3520 		dig = amdgpu_encoder->enc_priv;
3521 		dig->dig_encoder = -1;
3522 	}
3523 	amdgpu_encoder->active_device = 0;
3524 }
3525 
3526 /* these are handled by the primary encoders */
3527 static void dce_v8_0_ext_prepare(struct drm_encoder *encoder)
3528 {
3529 
3530 }
3531 
3532 static void dce_v8_0_ext_commit(struct drm_encoder *encoder)
3533 {
3534 
3535 }
3536 
3537 static void
3538 dce_v8_0_ext_mode_set(struct drm_encoder *encoder,
3539 		      struct drm_display_mode *mode,
3540 		      struct drm_display_mode *adjusted_mode)
3541 {
3542 
3543 }
3544 
3545 static void dce_v8_0_ext_disable(struct drm_encoder *encoder)
3546 {
3547 
3548 }
3549 
3550 static void
3551 dce_v8_0_ext_dpms(struct drm_encoder *encoder, int mode)
3552 {
3553 
3554 }
3555 
3556 static bool dce_v8_0_ext_mode_fixup(struct drm_encoder *encoder,
3557 				    const struct drm_display_mode *mode,
3558 				    struct drm_display_mode *adjusted_mode)
3559 {
3560 	return true;
3561 }
3562 
3563 static const struct drm_encoder_helper_funcs dce_v8_0_ext_helper_funcs = {
3564 	.dpms = dce_v8_0_ext_dpms,
3565 	.mode_fixup = dce_v8_0_ext_mode_fixup,
3566 	.prepare = dce_v8_0_ext_prepare,
3567 	.mode_set = dce_v8_0_ext_mode_set,
3568 	.commit = dce_v8_0_ext_commit,
3569 	.disable = dce_v8_0_ext_disable,
3570 	/* no detect for TMDS/LVDS yet */
3571 };
3572 
3573 static const struct drm_encoder_helper_funcs dce_v8_0_dig_helper_funcs = {
3574 	.dpms = amdgpu_atombios_encoder_dpms,
3575 	.mode_fixup = amdgpu_atombios_encoder_mode_fixup,
3576 	.prepare = dce_v8_0_encoder_prepare,
3577 	.mode_set = dce_v8_0_encoder_mode_set,
3578 	.commit = dce_v8_0_encoder_commit,
3579 	.disable = dce_v8_0_encoder_disable,
3580 	.detect = amdgpu_atombios_encoder_dig_detect,
3581 };
3582 
3583 static const struct drm_encoder_helper_funcs dce_v8_0_dac_helper_funcs = {
3584 	.dpms = amdgpu_atombios_encoder_dpms,
3585 	.mode_fixup = amdgpu_atombios_encoder_mode_fixup,
3586 	.prepare = dce_v8_0_encoder_prepare,
3587 	.mode_set = dce_v8_0_encoder_mode_set,
3588 	.commit = dce_v8_0_encoder_commit,
3589 	.detect = amdgpu_atombios_encoder_dac_detect,
3590 };
3591 
3592 static void dce_v8_0_encoder_destroy(struct drm_encoder *encoder)
3593 {
3594 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3595 	if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
3596 		amdgpu_atombios_encoder_fini_backlight(amdgpu_encoder);
3597 	kfree(amdgpu_encoder->enc_priv);
3598 	drm_encoder_cleanup(encoder);
3599 	kfree(amdgpu_encoder);
3600 }
3601 
3602 static const struct drm_encoder_funcs dce_v8_0_encoder_funcs = {
3603 	.destroy = dce_v8_0_encoder_destroy,
3604 };
3605 
3606 static void dce_v8_0_encoder_add(struct amdgpu_device *adev,
3607 				 uint32_t encoder_enum,
3608 				 uint32_t supported_device,
3609 				 u16 caps)
3610 {
3611 	struct drm_device *dev = adev->ddev;
3612 	struct drm_encoder *encoder;
3613 	struct amdgpu_encoder *amdgpu_encoder;
3614 
3615 	/* see if we already added it */
3616 	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
3617 		amdgpu_encoder = to_amdgpu_encoder(encoder);
3618 		if (amdgpu_encoder->encoder_enum == encoder_enum) {
3619 			amdgpu_encoder->devices |= supported_device;
3620 			return;
3621 		}
3622 
3623 	}
3624 
3625 	/* add a new one */
3626 	amdgpu_encoder = kzalloc(sizeof(struct amdgpu_encoder), GFP_KERNEL);
3627 	if (!amdgpu_encoder)
3628 		return;
3629 
3630 	encoder = &amdgpu_encoder->base;
3631 	switch (adev->mode_info.num_crtc) {
3632 	case 1:
3633 		encoder->possible_crtcs = 0x1;
3634 		break;
3635 	case 2:
3636 	default:
3637 		encoder->possible_crtcs = 0x3;
3638 		break;
3639 	case 4:
3640 		encoder->possible_crtcs = 0xf;
3641 		break;
3642 	case 6:
3643 		encoder->possible_crtcs = 0x3f;
3644 		break;
3645 	}
3646 
3647 	amdgpu_encoder->enc_priv = NULL;
3648 
3649 	amdgpu_encoder->encoder_enum = encoder_enum;
3650 	amdgpu_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
3651 	amdgpu_encoder->devices = supported_device;
3652 	amdgpu_encoder->rmx_type = RMX_OFF;
3653 	amdgpu_encoder->underscan_type = UNDERSCAN_OFF;
3654 	amdgpu_encoder->is_ext_encoder = false;
3655 	amdgpu_encoder->caps = caps;
3656 
3657 	switch (amdgpu_encoder->encoder_id) {
3658 	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
3659 	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
3660 		drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3661 				 DRM_MODE_ENCODER_DAC);
3662 		drm_encoder_helper_add(encoder, &dce_v8_0_dac_helper_funcs);
3663 		break;
3664 	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
3665 	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
3666 	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
3667 	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
3668 	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
3669 		if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
3670 			amdgpu_encoder->rmx_type = RMX_FULL;
3671 			drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3672 					 DRM_MODE_ENCODER_LVDS);
3673 			amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_lcd_info(amdgpu_encoder);
3674 		} else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) {
3675 			drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3676 					 DRM_MODE_ENCODER_DAC);
3677 			amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
3678 		} else {
3679 			drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3680 					 DRM_MODE_ENCODER_TMDS);
3681 			amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
3682 		}
3683 		drm_encoder_helper_add(encoder, &dce_v8_0_dig_helper_funcs);
3684 		break;
3685 	case ENCODER_OBJECT_ID_SI170B:
3686 	case ENCODER_OBJECT_ID_CH7303:
3687 	case ENCODER_OBJECT_ID_EXTERNAL_SDVOA:
3688 	case ENCODER_OBJECT_ID_EXTERNAL_SDVOB:
3689 	case ENCODER_OBJECT_ID_TITFP513:
3690 	case ENCODER_OBJECT_ID_VT1623:
3691 	case ENCODER_OBJECT_ID_HDMI_SI1930:
3692 	case ENCODER_OBJECT_ID_TRAVIS:
3693 	case ENCODER_OBJECT_ID_NUTMEG:
3694 		/* these are handled by the primary encoders */
3695 		amdgpu_encoder->is_ext_encoder = true;
3696 		if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
3697 			drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3698 					 DRM_MODE_ENCODER_LVDS);
3699 		else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT))
3700 			drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3701 					 DRM_MODE_ENCODER_DAC);
3702 		else
3703 			drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3704 					 DRM_MODE_ENCODER_TMDS);
3705 		drm_encoder_helper_add(encoder, &dce_v8_0_ext_helper_funcs);
3706 		break;
3707 	}
3708 }
3709 
3710 static const struct amdgpu_display_funcs dce_v8_0_display_funcs = {
3711 	.set_vga_render_state = &dce_v8_0_set_vga_render_state,
3712 	.bandwidth_update = &dce_v8_0_bandwidth_update,
3713 	.vblank_get_counter = &dce_v8_0_vblank_get_counter,
3714 	.vblank_wait = &dce_v8_0_vblank_wait,
3715 	.is_display_hung = &dce_v8_0_is_display_hung,
3716 	.backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level,
3717 	.backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level,
3718 	.hpd_sense = &dce_v8_0_hpd_sense,
3719 	.hpd_set_polarity = &dce_v8_0_hpd_set_polarity,
3720 	.hpd_get_gpio_reg = &dce_v8_0_hpd_get_gpio_reg,
3721 	.page_flip = &dce_v8_0_page_flip,
3722 	.page_flip_get_scanoutpos = &dce_v8_0_crtc_get_scanoutpos,
3723 	.add_encoder = &dce_v8_0_encoder_add,
3724 	.add_connector = &amdgpu_connector_add,
3725 	.stop_mc_access = &dce_v8_0_stop_mc_access,
3726 	.resume_mc_access = &dce_v8_0_resume_mc_access,
3727 };
3728 
3729 static void dce_v8_0_set_display_funcs(struct amdgpu_device *adev)
3730 {
3731 	if (adev->mode_info.funcs == NULL)
3732 		adev->mode_info.funcs = &dce_v8_0_display_funcs;
3733 }
3734 
3735 static const struct amdgpu_irq_src_funcs dce_v8_0_crtc_irq_funcs = {
3736 	.set = dce_v8_0_set_crtc_interrupt_state,
3737 	.process = dce_v8_0_crtc_irq,
3738 };
3739 
3740 static const struct amdgpu_irq_src_funcs dce_v8_0_pageflip_irq_funcs = {
3741 	.set = dce_v8_0_set_pageflip_interrupt_state,
3742 	.process = dce_v8_0_pageflip_irq,
3743 };
3744 
3745 static const struct amdgpu_irq_src_funcs dce_v8_0_hpd_irq_funcs = {
3746 	.set = dce_v8_0_set_hpd_interrupt_state,
3747 	.process = dce_v8_0_hpd_irq,
3748 };
3749 
3750 static void dce_v8_0_set_irq_funcs(struct amdgpu_device *adev)
3751 {
3752 	adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_LAST;
3753 	adev->crtc_irq.funcs = &dce_v8_0_crtc_irq_funcs;
3754 
3755 	adev->pageflip_irq.num_types = AMDGPU_PAGEFLIP_IRQ_LAST;
3756 	adev->pageflip_irq.funcs = &dce_v8_0_pageflip_irq_funcs;
3757 
3758 	adev->hpd_irq.num_types = AMDGPU_HPD_LAST;
3759 	adev->hpd_irq.funcs = &dce_v8_0_hpd_irq_funcs;
3760 }
3761