1 /* 2 * Copyright 2015 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #include <linux/pci.h> 25 26 #include <drm/drm_edid.h> 27 #include <drm/drm_fourcc.h> 28 #include <drm/drm_modeset_helper.h> 29 #include <drm/drm_modeset_helper_vtables.h> 30 #include <drm/drm_vblank.h> 31 32 #include "amdgpu.h" 33 #include "amdgpu_pm.h" 34 #include "amdgpu_i2c.h" 35 #include "atom.h" 36 #include "amdgpu_atombios.h" 37 #include "atombios_crtc.h" 38 #include "atombios_encoders.h" 39 #include "amdgpu_pll.h" 40 #include "amdgpu_connectors.h" 41 #include "amdgpu_display.h" 42 43 #include "dce_v6_0.h" 44 #include "sid.h" 45 46 #include "bif/bif_3_0_d.h" 47 #include "bif/bif_3_0_sh_mask.h" 48 49 #include "oss/oss_1_0_d.h" 50 #include "oss/oss_1_0_sh_mask.h" 51 52 #include "gca/gfx_6_0_d.h" 53 #include "gca/gfx_6_0_sh_mask.h" 54 #include "gca/gfx_7_2_enum.h" 55 56 #include "gmc/gmc_6_0_d.h" 57 #include "gmc/gmc_6_0_sh_mask.h" 58 59 #include "dce/dce_6_0_d.h" 60 #include "dce/dce_6_0_sh_mask.h" 61 62 #include "si_enums.h" 63 64 static void dce_v6_0_set_display_funcs(struct amdgpu_device *adev); 65 static void dce_v6_0_set_irq_funcs(struct amdgpu_device *adev); 66 67 static const u32 crtc_offsets[6] = 68 { 69 CRTC0_REGISTER_OFFSET, 70 CRTC1_REGISTER_OFFSET, 71 CRTC2_REGISTER_OFFSET, 72 CRTC3_REGISTER_OFFSET, 73 CRTC4_REGISTER_OFFSET, 74 CRTC5_REGISTER_OFFSET 75 }; 76 77 static const u32 hpd_offsets[] = 78 { 79 HPD0_REGISTER_OFFSET, 80 HPD1_REGISTER_OFFSET, 81 HPD2_REGISTER_OFFSET, 82 HPD3_REGISTER_OFFSET, 83 HPD4_REGISTER_OFFSET, 84 HPD5_REGISTER_OFFSET 85 }; 86 87 static const uint32_t dig_offsets[] = { 88 CRTC0_REGISTER_OFFSET, 89 CRTC1_REGISTER_OFFSET, 90 CRTC2_REGISTER_OFFSET, 91 CRTC3_REGISTER_OFFSET, 92 CRTC4_REGISTER_OFFSET, 93 CRTC5_REGISTER_OFFSET, 94 (0x13830 - 0x7030) >> 2, 95 }; 96 97 static const struct { 98 uint32_t reg; 99 uint32_t vblank; 100 uint32_t vline; 101 uint32_t hpd; 102 103 } interrupt_status_offsets[6] = { { 104 .reg = mmDISP_INTERRUPT_STATUS, 105 .vblank = DISP_INTERRUPT_STATUS__LB_D1_VBLANK_INTERRUPT_MASK, 106 .vline = DISP_INTERRUPT_STATUS__LB_D1_VLINE_INTERRUPT_MASK, 107 .hpd = DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT_MASK 108 }, { 109 .reg = mmDISP_INTERRUPT_STATUS_CONTINUE, 110 .vblank = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VBLANK_INTERRUPT_MASK, 111 .vline = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VLINE_INTERRUPT_MASK, 112 .hpd = DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_INTERRUPT_MASK 113 }, { 114 .reg = mmDISP_INTERRUPT_STATUS_CONTINUE2, 115 .vblank = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VBLANK_INTERRUPT_MASK, 116 .vline = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VLINE_INTERRUPT_MASK, 117 .hpd = DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_INTERRUPT_MASK 118 }, { 119 .reg = mmDISP_INTERRUPT_STATUS_CONTINUE3, 120 .vblank = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VBLANK_INTERRUPT_MASK, 121 .vline = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VLINE_INTERRUPT_MASK, 122 .hpd = DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_INTERRUPT_MASK 123 }, { 124 .reg = mmDISP_INTERRUPT_STATUS_CONTINUE4, 125 .vblank = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VBLANK_INTERRUPT_MASK, 126 .vline = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VLINE_INTERRUPT_MASK, 127 .hpd = DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_INTERRUPT_MASK 128 }, { 129 .reg = mmDISP_INTERRUPT_STATUS_CONTINUE5, 130 .vblank = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VBLANK_INTERRUPT_MASK, 131 .vline = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VLINE_INTERRUPT_MASK, 132 .hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK 133 } }; 134 135 static u32 dce_v6_0_audio_endpt_rreg(struct amdgpu_device *adev, 136 u32 block_offset, u32 reg) 137 { 138 unsigned long flags; 139 u32 r; 140 141 spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags); 142 WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg); 143 r = RREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset); 144 spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags); 145 146 return r; 147 } 148 149 static void dce_v6_0_audio_endpt_wreg(struct amdgpu_device *adev, 150 u32 block_offset, u32 reg, u32 v) 151 { 152 unsigned long flags; 153 154 spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags); 155 WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, 156 reg | AZALIA_F0_CODEC_ENDPOINT_INDEX__AZALIA_ENDPOINT_REG_WRITE_EN_MASK); 157 WREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset, v); 158 spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags); 159 } 160 161 static u32 dce_v6_0_vblank_get_counter(struct amdgpu_device *adev, int crtc) 162 { 163 if (crtc >= adev->mode_info.num_crtc) 164 return 0; 165 else 166 return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]); 167 } 168 169 static void dce_v6_0_pageflip_interrupt_init(struct amdgpu_device *adev) 170 { 171 unsigned i; 172 173 /* Enable pflip interrupts */ 174 for (i = 0; i < adev->mode_info.num_crtc; i++) 175 amdgpu_irq_get(adev, &adev->pageflip_irq, i); 176 } 177 178 static void dce_v6_0_pageflip_interrupt_fini(struct amdgpu_device *adev) 179 { 180 unsigned i; 181 182 /* Disable pflip interrupts */ 183 for (i = 0; i < adev->mode_info.num_crtc; i++) 184 amdgpu_irq_put(adev, &adev->pageflip_irq, i); 185 } 186 187 /** 188 * dce_v6_0_page_flip - pageflip callback. 189 * 190 * @adev: amdgpu_device pointer 191 * @crtc_id: crtc to cleanup pageflip on 192 * @crtc_base: new address of the crtc (GPU MC address) 193 * @async: asynchronous flip 194 * 195 * Does the actual pageflip (evergreen+). 196 * During vblank we take the crtc lock and wait for the update_pending 197 * bit to go high, when it does, we release the lock, and allow the 198 * double buffered update to take place. 199 * Returns the current update pending status. 200 */ 201 static void dce_v6_0_page_flip(struct amdgpu_device *adev, 202 int crtc_id, u64 crtc_base, bool async) 203 { 204 struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id]; 205 struct drm_framebuffer *fb = amdgpu_crtc->base.primary->fb; 206 207 /* flip at hsync for async, default is vsync */ 208 WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, async ? 209 GRPH_FLIP_CONTROL__GRPH_SURFACE_UPDATE_H_RETRACE_EN_MASK : 0); 210 /* update pitch */ 211 WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset, 212 fb->pitches[0] / fb->format->cpp[0]); 213 /* update the scanout addresses */ 214 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, 215 upper_32_bits(crtc_base)); 216 /* writing to the low address triggers the update */ 217 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset, 218 (u32)crtc_base); 219 /* post the write */ 220 RREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset); 221 } 222 223 static int dce_v6_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc, 224 u32 *vbl, u32 *position) 225 { 226 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc)) 227 return -EINVAL; 228 229 *vbl = RREG32(mmCRTC_V_BLANK_START_END + crtc_offsets[crtc]); 230 *position = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]); 231 232 return 0; 233 } 234 235 /** 236 * dce_v6_0_hpd_sense - hpd sense callback. 237 * 238 * @adev: amdgpu_device pointer 239 * @hpd: hpd (hotplug detect) pin 240 * 241 * Checks if a digital monitor is connected (evergreen+). 242 * Returns true if connected, false if not connected. 243 */ 244 static bool dce_v6_0_hpd_sense(struct amdgpu_device *adev, 245 enum amdgpu_hpd_id hpd) 246 { 247 bool connected = false; 248 249 if (hpd >= adev->mode_info.num_hpd) 250 return connected; 251 252 if (RREG32(mmDC_HPD1_INT_STATUS + hpd_offsets[hpd]) & 253 DC_HPD1_INT_STATUS__DC_HPD1_SENSE_MASK) 254 connected = true; 255 256 return connected; 257 } 258 259 /** 260 * dce_v6_0_hpd_set_polarity - hpd set polarity callback. 261 * 262 * @adev: amdgpu_device pointer 263 * @hpd: hpd (hotplug detect) pin 264 * 265 * Set the polarity of the hpd pin (evergreen+). 266 */ 267 static void dce_v6_0_hpd_set_polarity(struct amdgpu_device *adev, 268 enum amdgpu_hpd_id hpd) 269 { 270 u32 tmp; 271 bool connected = dce_v6_0_hpd_sense(adev, hpd); 272 273 if (hpd >= adev->mode_info.num_hpd) 274 return; 275 276 tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]); 277 if (connected) 278 tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK; 279 else 280 tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK; 281 WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp); 282 } 283 284 static void dce_v6_0_hpd_int_ack(struct amdgpu_device *adev, 285 int hpd) 286 { 287 u32 tmp; 288 289 if (hpd >= adev->mode_info.num_hpd) { 290 DRM_DEBUG("invalid hpd %d\n", hpd); 291 return; 292 } 293 294 tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]); 295 tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK; 296 WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp); 297 } 298 299 /** 300 * dce_v6_0_hpd_init - hpd setup callback. 301 * 302 * @adev: amdgpu_device pointer 303 * 304 * Setup the hpd pins used by the card (evergreen+). 305 * Enable the pin, set the polarity, and enable the hpd interrupts. 306 */ 307 static void dce_v6_0_hpd_init(struct amdgpu_device *adev) 308 { 309 struct drm_device *dev = adev_to_drm(adev); 310 struct drm_connector *connector; 311 struct drm_connector_list_iter iter; 312 u32 tmp; 313 314 drm_connector_list_iter_begin(dev, &iter); 315 drm_for_each_connector_iter(connector, &iter) { 316 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); 317 318 if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd) 319 continue; 320 321 tmp = RREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]); 322 tmp |= DC_HPD1_CONTROL__DC_HPD1_EN_MASK; 323 WREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp); 324 325 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP || 326 connector->connector_type == DRM_MODE_CONNECTOR_LVDS) { 327 /* don't try to enable hpd on eDP or LVDS avoid breaking the 328 * aux dp channel on imac and help (but not completely fix) 329 * https://bugzilla.redhat.com/show_bug.cgi?id=726143 330 * also avoid interrupt storms during dpms. 331 */ 332 tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]); 333 tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK; 334 WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp); 335 continue; 336 } 337 338 dce_v6_0_hpd_int_ack(adev, amdgpu_connector->hpd.hpd); 339 dce_v6_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd); 340 amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd); 341 } 342 drm_connector_list_iter_end(&iter); 343 } 344 345 /** 346 * dce_v6_0_hpd_fini - hpd tear down callback. 347 * 348 * @adev: amdgpu_device pointer 349 * 350 * Tear down the hpd pins used by the card (evergreen+). 351 * Disable the hpd interrupts. 352 */ 353 static void dce_v6_0_hpd_fini(struct amdgpu_device *adev) 354 { 355 struct drm_device *dev = adev_to_drm(adev); 356 struct drm_connector *connector; 357 struct drm_connector_list_iter iter; 358 u32 tmp; 359 360 drm_connector_list_iter_begin(dev, &iter); 361 drm_for_each_connector_iter(connector, &iter) { 362 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); 363 364 if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd) 365 continue; 366 367 tmp = RREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]); 368 tmp &= ~DC_HPD1_CONTROL__DC_HPD1_EN_MASK; 369 WREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp); 370 371 amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd); 372 } 373 drm_connector_list_iter_end(&iter); 374 } 375 376 static u32 dce_v6_0_hpd_get_gpio_reg(struct amdgpu_device *adev) 377 { 378 return mmDC_GPIO_HPD_A; 379 } 380 381 static bool dce_v6_0_is_display_hung(struct amdgpu_device *adev) 382 { 383 u32 crtc_hung = 0; 384 u32 crtc_status[6]; 385 u32 i, j, tmp; 386 387 for (i = 0; i < adev->mode_info.num_crtc; i++) { 388 if (RREG32(mmCRTC_CONTROL + crtc_offsets[i]) & CRTC_CONTROL__CRTC_MASTER_EN_MASK) { 389 crtc_status[i] = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]); 390 crtc_hung |= (1 << i); 391 } 392 } 393 394 for (j = 0; j < 10; j++) { 395 for (i = 0; i < adev->mode_info.num_crtc; i++) { 396 if (crtc_hung & (1 << i)) { 397 tmp = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]); 398 if (tmp != crtc_status[i]) 399 crtc_hung &= ~(1 << i); 400 } 401 } 402 if (crtc_hung == 0) 403 return false; 404 udelay(100); 405 } 406 407 return true; 408 } 409 410 static void dce_v6_0_set_vga_render_state(struct amdgpu_device *adev, 411 bool render) 412 { 413 if (!render) 414 WREG32(mmVGA_RENDER_CONTROL, 415 RREG32(mmVGA_RENDER_CONTROL) & ~VGA_RENDER_CONTROL__VGA_VSTATUS_CNTL_MASK); 416 } 417 418 static int dce_v6_0_get_num_crtc(struct amdgpu_device *adev) 419 { 420 switch (adev->asic_type) { 421 case CHIP_TAHITI: 422 case CHIP_PITCAIRN: 423 case CHIP_VERDE: 424 return 6; 425 case CHIP_OLAND: 426 return 2; 427 default: 428 return 0; 429 } 430 } 431 432 void dce_v6_0_disable_dce(struct amdgpu_device *adev) 433 { 434 /*Disable VGA render and enabled crtc, if has DCE engine*/ 435 if (amdgpu_atombios_has_dce_engine_info(adev)) { 436 u32 tmp; 437 int crtc_enabled, i; 438 439 dce_v6_0_set_vga_render_state(adev, false); 440 441 /*Disable crtc*/ 442 for (i = 0; i < dce_v6_0_get_num_crtc(adev); i++) { 443 crtc_enabled = RREG32(mmCRTC_CONTROL + crtc_offsets[i]) & 444 CRTC_CONTROL__CRTC_MASTER_EN_MASK; 445 if (crtc_enabled) { 446 WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1); 447 tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]); 448 tmp &= ~CRTC_CONTROL__CRTC_MASTER_EN_MASK; 449 WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp); 450 WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0); 451 } 452 } 453 } 454 } 455 456 static void dce_v6_0_program_fmt(struct drm_encoder *encoder) 457 { 458 struct drm_device *dev = encoder->dev; 459 struct amdgpu_device *adev = drm_to_adev(dev); 460 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 461 struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder); 462 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc); 463 int bpc = 0; 464 u32 tmp = 0; 465 enum amdgpu_connector_dither dither = AMDGPU_FMT_DITHER_DISABLE; 466 467 if (connector) { 468 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); 469 bpc = amdgpu_connector_get_monitor_bpc(connector); 470 dither = amdgpu_connector->dither; 471 } 472 473 /* LVDS FMT is set up by atom */ 474 if (amdgpu_encoder->devices & ATOM_DEVICE_LCD_SUPPORT) 475 return; 476 477 if (bpc == 0) 478 return; 479 480 481 switch (bpc) { 482 case 6: 483 if (dither == AMDGPU_FMT_DITHER_ENABLE) 484 /* XXX sort out optimal dither settings */ 485 tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK | 486 FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK | 487 FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK); 488 else 489 tmp |= FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK; 490 break; 491 case 8: 492 if (dither == AMDGPU_FMT_DITHER_ENABLE) 493 /* XXX sort out optimal dither settings */ 494 tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK | 495 FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK | 496 FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE_MASK | 497 FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK | 498 FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH_MASK); 499 else 500 tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK | 501 FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH_MASK); 502 break; 503 case 10: 504 default: 505 /* not needed */ 506 break; 507 } 508 509 WREG32(mmFMT_BIT_DEPTH_CONTROL + amdgpu_crtc->crtc_offset, tmp); 510 } 511 512 /** 513 * si_get_number_of_dram_channels - get the number of dram channels 514 * 515 * @adev: amdgpu_device pointer 516 * 517 * Look up the number of video ram channels (CIK). 518 * Used for display watermark bandwidth calculations 519 * Returns the number of dram channels 520 */ 521 static u32 si_get_number_of_dram_channels(struct amdgpu_device *adev) 522 { 523 u32 tmp = RREG32(mmMC_SHARED_CHMAP); 524 525 switch ((tmp & MC_SHARED_CHMAP__NOOFCHAN_MASK) >> MC_SHARED_CHMAP__NOOFCHAN__SHIFT) { 526 case 0: 527 default: 528 return 1; 529 case 1: 530 return 2; 531 case 2: 532 return 4; 533 case 3: 534 return 8; 535 case 4: 536 return 3; 537 case 5: 538 return 6; 539 case 6: 540 return 10; 541 case 7: 542 return 12; 543 case 8: 544 return 16; 545 } 546 } 547 548 struct dce6_wm_params { 549 u32 dram_channels; /* number of dram channels */ 550 u32 yclk; /* bandwidth per dram data pin in kHz */ 551 u32 sclk; /* engine clock in kHz */ 552 u32 disp_clk; /* display clock in kHz */ 553 u32 src_width; /* viewport width */ 554 u32 active_time; /* active display time in ns */ 555 u32 blank_time; /* blank time in ns */ 556 bool interlaced; /* mode is interlaced */ 557 fixed20_12 vsc; /* vertical scale ratio */ 558 u32 num_heads; /* number of active crtcs */ 559 u32 bytes_per_pixel; /* bytes per pixel display + overlay */ 560 u32 lb_size; /* line buffer allocated to pipe */ 561 u32 vtaps; /* vertical scaler taps */ 562 }; 563 564 /** 565 * dce_v6_0_dram_bandwidth - get the dram bandwidth 566 * 567 * @wm: watermark calculation data 568 * 569 * Calculate the raw dram bandwidth (CIK). 570 * Used for display watermark bandwidth calculations 571 * Returns the dram bandwidth in MBytes/s 572 */ 573 static u32 dce_v6_0_dram_bandwidth(struct dce6_wm_params *wm) 574 { 575 /* Calculate raw DRAM Bandwidth */ 576 fixed20_12 dram_efficiency; /* 0.7 */ 577 fixed20_12 yclk, dram_channels, bandwidth; 578 fixed20_12 a; 579 580 a.full = dfixed_const(1000); 581 yclk.full = dfixed_const(wm->yclk); 582 yclk.full = dfixed_div(yclk, a); 583 dram_channels.full = dfixed_const(wm->dram_channels * 4); 584 a.full = dfixed_const(10); 585 dram_efficiency.full = dfixed_const(7); 586 dram_efficiency.full = dfixed_div(dram_efficiency, a); 587 bandwidth.full = dfixed_mul(dram_channels, yclk); 588 bandwidth.full = dfixed_mul(bandwidth, dram_efficiency); 589 590 return dfixed_trunc(bandwidth); 591 } 592 593 /** 594 * dce_v6_0_dram_bandwidth_for_display - get the dram bandwidth for display 595 * 596 * @wm: watermark calculation data 597 * 598 * Calculate the dram bandwidth used for display (CIK). 599 * Used for display watermark bandwidth calculations 600 * Returns the dram bandwidth for display in MBytes/s 601 */ 602 static u32 dce_v6_0_dram_bandwidth_for_display(struct dce6_wm_params *wm) 603 { 604 /* Calculate DRAM Bandwidth and the part allocated to display. */ 605 fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */ 606 fixed20_12 yclk, dram_channels, bandwidth; 607 fixed20_12 a; 608 609 a.full = dfixed_const(1000); 610 yclk.full = dfixed_const(wm->yclk); 611 yclk.full = dfixed_div(yclk, a); 612 dram_channels.full = dfixed_const(wm->dram_channels * 4); 613 a.full = dfixed_const(10); 614 disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */ 615 disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a); 616 bandwidth.full = dfixed_mul(dram_channels, yclk); 617 bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation); 618 619 return dfixed_trunc(bandwidth); 620 } 621 622 /** 623 * dce_v6_0_data_return_bandwidth - get the data return bandwidth 624 * 625 * @wm: watermark calculation data 626 * 627 * Calculate the data return bandwidth used for display (CIK). 628 * Used for display watermark bandwidth calculations 629 * Returns the data return bandwidth in MBytes/s 630 */ 631 static u32 dce_v6_0_data_return_bandwidth(struct dce6_wm_params *wm) 632 { 633 /* Calculate the display Data return Bandwidth */ 634 fixed20_12 return_efficiency; /* 0.8 */ 635 fixed20_12 sclk, bandwidth; 636 fixed20_12 a; 637 638 a.full = dfixed_const(1000); 639 sclk.full = dfixed_const(wm->sclk); 640 sclk.full = dfixed_div(sclk, a); 641 a.full = dfixed_const(10); 642 return_efficiency.full = dfixed_const(8); 643 return_efficiency.full = dfixed_div(return_efficiency, a); 644 a.full = dfixed_const(32); 645 bandwidth.full = dfixed_mul(a, sclk); 646 bandwidth.full = dfixed_mul(bandwidth, return_efficiency); 647 648 return dfixed_trunc(bandwidth); 649 } 650 651 /** 652 * dce_v6_0_dmif_request_bandwidth - get the dmif bandwidth 653 * 654 * @wm: watermark calculation data 655 * 656 * Calculate the dmif bandwidth used for display (CIK). 657 * Used for display watermark bandwidth calculations 658 * Returns the dmif bandwidth in MBytes/s 659 */ 660 static u32 dce_v6_0_dmif_request_bandwidth(struct dce6_wm_params *wm) 661 { 662 /* Calculate the DMIF Request Bandwidth */ 663 fixed20_12 disp_clk_request_efficiency; /* 0.8 */ 664 fixed20_12 disp_clk, bandwidth; 665 fixed20_12 a, b; 666 667 a.full = dfixed_const(1000); 668 disp_clk.full = dfixed_const(wm->disp_clk); 669 disp_clk.full = dfixed_div(disp_clk, a); 670 a.full = dfixed_const(32); 671 b.full = dfixed_mul(a, disp_clk); 672 673 a.full = dfixed_const(10); 674 disp_clk_request_efficiency.full = dfixed_const(8); 675 disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a); 676 677 bandwidth.full = dfixed_mul(b, disp_clk_request_efficiency); 678 679 return dfixed_trunc(bandwidth); 680 } 681 682 /** 683 * dce_v6_0_available_bandwidth - get the min available bandwidth 684 * 685 * @wm: watermark calculation data 686 * 687 * Calculate the min available bandwidth used for display (CIK). 688 * Used for display watermark bandwidth calculations 689 * Returns the min available bandwidth in MBytes/s 690 */ 691 static u32 dce_v6_0_available_bandwidth(struct dce6_wm_params *wm) 692 { 693 /* Calculate the Available bandwidth. Display can use this temporarily but not in average. */ 694 u32 dram_bandwidth = dce_v6_0_dram_bandwidth(wm); 695 u32 data_return_bandwidth = dce_v6_0_data_return_bandwidth(wm); 696 u32 dmif_req_bandwidth = dce_v6_0_dmif_request_bandwidth(wm); 697 698 return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth)); 699 } 700 701 /** 702 * dce_v6_0_average_bandwidth - get the average available bandwidth 703 * 704 * @wm: watermark calculation data 705 * 706 * Calculate the average available bandwidth used for display (CIK). 707 * Used for display watermark bandwidth calculations 708 * Returns the average available bandwidth in MBytes/s 709 */ 710 static u32 dce_v6_0_average_bandwidth(struct dce6_wm_params *wm) 711 { 712 /* Calculate the display mode Average Bandwidth 713 * DisplayMode should contain the source and destination dimensions, 714 * timing, etc. 715 */ 716 fixed20_12 bpp; 717 fixed20_12 line_time; 718 fixed20_12 src_width; 719 fixed20_12 bandwidth; 720 fixed20_12 a; 721 722 a.full = dfixed_const(1000); 723 line_time.full = dfixed_const(wm->active_time + wm->blank_time); 724 line_time.full = dfixed_div(line_time, a); 725 bpp.full = dfixed_const(wm->bytes_per_pixel); 726 src_width.full = dfixed_const(wm->src_width); 727 bandwidth.full = dfixed_mul(src_width, bpp); 728 bandwidth.full = dfixed_mul(bandwidth, wm->vsc); 729 bandwidth.full = dfixed_div(bandwidth, line_time); 730 731 return dfixed_trunc(bandwidth); 732 } 733 734 /** 735 * dce_v6_0_latency_watermark - get the latency watermark 736 * 737 * @wm: watermark calculation data 738 * 739 * Calculate the latency watermark (CIK). 740 * Used for display watermark bandwidth calculations 741 * Returns the latency watermark in ns 742 */ 743 static u32 dce_v6_0_latency_watermark(struct dce6_wm_params *wm) 744 { 745 /* First calculate the latency in ns */ 746 u32 mc_latency = 2000; /* 2000 ns. */ 747 u32 available_bandwidth = dce_v6_0_available_bandwidth(wm); 748 u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth; 749 u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth; 750 u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */ 751 u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) + 752 (wm->num_heads * cursor_line_pair_return_time); 753 u32 latency = mc_latency + other_heads_data_return_time + dc_latency; 754 u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time; 755 u32 tmp, dmif_size = 12288; 756 fixed20_12 a, b, c; 757 758 if (wm->num_heads == 0) 759 return 0; 760 761 a.full = dfixed_const(2); 762 b.full = dfixed_const(1); 763 if ((wm->vsc.full > a.full) || 764 ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) || 765 (wm->vtaps >= 5) || 766 ((wm->vsc.full >= a.full) && wm->interlaced)) 767 max_src_lines_per_dst_line = 4; 768 else 769 max_src_lines_per_dst_line = 2; 770 771 a.full = dfixed_const(available_bandwidth); 772 b.full = dfixed_const(wm->num_heads); 773 a.full = dfixed_div(a, b); 774 tmp = div_u64((u64) dmif_size * (u64) wm->disp_clk, mc_latency + 512); 775 tmp = min(dfixed_trunc(a), tmp); 776 777 lb_fill_bw = min(tmp, wm->disp_clk * wm->bytes_per_pixel / 1000); 778 779 a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel); 780 b.full = dfixed_const(1000); 781 c.full = dfixed_const(lb_fill_bw); 782 b.full = dfixed_div(c, b); 783 a.full = dfixed_div(a, b); 784 line_fill_time = dfixed_trunc(a); 785 786 if (line_fill_time < wm->active_time) 787 return latency; 788 else 789 return latency + (line_fill_time - wm->active_time); 790 791 } 792 793 /** 794 * dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display - check 795 * average and available dram bandwidth 796 * 797 * @wm: watermark calculation data 798 * 799 * Check if the display average bandwidth fits in the display 800 * dram bandwidth (CIK). 801 * Used for display watermark bandwidth calculations 802 * Returns true if the display fits, false if not. 803 */ 804 static bool dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display(struct dce6_wm_params *wm) 805 { 806 if (dce_v6_0_average_bandwidth(wm) <= 807 (dce_v6_0_dram_bandwidth_for_display(wm) / wm->num_heads)) 808 return true; 809 else 810 return false; 811 } 812 813 /** 814 * dce_v6_0_average_bandwidth_vs_available_bandwidth - check 815 * average and available bandwidth 816 * 817 * @wm: watermark calculation data 818 * 819 * Check if the display average bandwidth fits in the display 820 * available bandwidth (CIK). 821 * Used for display watermark bandwidth calculations 822 * Returns true if the display fits, false if not. 823 */ 824 static bool dce_v6_0_average_bandwidth_vs_available_bandwidth(struct dce6_wm_params *wm) 825 { 826 if (dce_v6_0_average_bandwidth(wm) <= 827 (dce_v6_0_available_bandwidth(wm) / wm->num_heads)) 828 return true; 829 else 830 return false; 831 } 832 833 /** 834 * dce_v6_0_check_latency_hiding - check latency hiding 835 * 836 * @wm: watermark calculation data 837 * 838 * Check latency hiding (CIK). 839 * Used for display watermark bandwidth calculations 840 * Returns true if the display fits, false if not. 841 */ 842 static bool dce_v6_0_check_latency_hiding(struct dce6_wm_params *wm) 843 { 844 u32 lb_partitions = wm->lb_size / wm->src_width; 845 u32 line_time = wm->active_time + wm->blank_time; 846 u32 latency_tolerant_lines; 847 u32 latency_hiding; 848 fixed20_12 a; 849 850 a.full = dfixed_const(1); 851 if (wm->vsc.full > a.full) 852 latency_tolerant_lines = 1; 853 else { 854 if (lb_partitions <= (wm->vtaps + 1)) 855 latency_tolerant_lines = 1; 856 else 857 latency_tolerant_lines = 2; 858 } 859 860 latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time); 861 862 if (dce_v6_0_latency_watermark(wm) <= latency_hiding) 863 return true; 864 else 865 return false; 866 } 867 868 /** 869 * dce_v6_0_program_watermarks - program display watermarks 870 * 871 * @adev: amdgpu_device pointer 872 * @amdgpu_crtc: the selected display controller 873 * @lb_size: line buffer size 874 * @num_heads: number of display controllers in use 875 * 876 * Calculate and program the display watermarks for the 877 * selected display controller (CIK). 878 */ 879 static void dce_v6_0_program_watermarks(struct amdgpu_device *adev, 880 struct amdgpu_crtc *amdgpu_crtc, 881 u32 lb_size, u32 num_heads) 882 { 883 struct drm_display_mode *mode = &amdgpu_crtc->base.mode; 884 struct dce6_wm_params wm_low, wm_high; 885 u32 dram_channels; 886 u32 active_time; 887 u32 line_time = 0; 888 u32 latency_watermark_a = 0, latency_watermark_b = 0; 889 u32 priority_a_mark = 0, priority_b_mark = 0; 890 u32 priority_a_cnt = PRIORITY_OFF; 891 u32 priority_b_cnt = PRIORITY_OFF; 892 u32 tmp, arb_control3, lb_vblank_lead_lines = 0; 893 fixed20_12 a, b, c; 894 895 if (amdgpu_crtc->base.enabled && num_heads && mode) { 896 active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000, 897 (u32)mode->clock); 898 line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000, 899 (u32)mode->clock); 900 line_time = min_t(u32, line_time, 65535); 901 priority_a_cnt = 0; 902 priority_b_cnt = 0; 903 904 dram_channels = si_get_number_of_dram_channels(adev); 905 906 /* watermark for high clocks */ 907 if (adev->pm.dpm_enabled) { 908 wm_high.yclk = 909 amdgpu_dpm_get_mclk(adev, false) * 10; 910 wm_high.sclk = 911 amdgpu_dpm_get_sclk(adev, false) * 10; 912 } else { 913 wm_high.yclk = adev->pm.current_mclk * 10; 914 wm_high.sclk = adev->pm.current_sclk * 10; 915 } 916 917 wm_high.disp_clk = mode->clock; 918 wm_high.src_width = mode->crtc_hdisplay; 919 wm_high.active_time = active_time; 920 wm_high.blank_time = line_time - wm_high.active_time; 921 wm_high.interlaced = false; 922 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 923 wm_high.interlaced = true; 924 wm_high.vsc = amdgpu_crtc->vsc; 925 wm_high.vtaps = 1; 926 if (amdgpu_crtc->rmx_type != RMX_OFF) 927 wm_high.vtaps = 2; 928 wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */ 929 wm_high.lb_size = lb_size; 930 wm_high.dram_channels = dram_channels; 931 wm_high.num_heads = num_heads; 932 933 /* watermark for low clocks */ 934 if (adev->pm.dpm_enabled) { 935 wm_low.yclk = 936 amdgpu_dpm_get_mclk(adev, true) * 10; 937 wm_low.sclk = 938 amdgpu_dpm_get_sclk(adev, true) * 10; 939 } else { 940 wm_low.yclk = adev->pm.current_mclk * 10; 941 wm_low.sclk = adev->pm.current_sclk * 10; 942 } 943 944 wm_low.disp_clk = mode->clock; 945 wm_low.src_width = mode->crtc_hdisplay; 946 wm_low.active_time = active_time; 947 wm_low.blank_time = line_time - wm_low.active_time; 948 wm_low.interlaced = false; 949 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 950 wm_low.interlaced = true; 951 wm_low.vsc = amdgpu_crtc->vsc; 952 wm_low.vtaps = 1; 953 if (amdgpu_crtc->rmx_type != RMX_OFF) 954 wm_low.vtaps = 2; 955 wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */ 956 wm_low.lb_size = lb_size; 957 wm_low.dram_channels = dram_channels; 958 wm_low.num_heads = num_heads; 959 960 /* set for high clocks */ 961 latency_watermark_a = min_t(u32, dce_v6_0_latency_watermark(&wm_high), 65535); 962 /* set for low clocks */ 963 latency_watermark_b = min_t(u32, dce_v6_0_latency_watermark(&wm_low), 65535); 964 965 /* possibly force display priority to high */ 966 /* should really do this at mode validation time... */ 967 if (!dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) || 968 !dce_v6_0_average_bandwidth_vs_available_bandwidth(&wm_high) || 969 !dce_v6_0_check_latency_hiding(&wm_high) || 970 (adev->mode_info.disp_priority == 2)) { 971 DRM_DEBUG_KMS("force priority to high\n"); 972 priority_a_cnt |= PRIORITY_ALWAYS_ON; 973 priority_b_cnt |= PRIORITY_ALWAYS_ON; 974 } 975 if (!dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) || 976 !dce_v6_0_average_bandwidth_vs_available_bandwidth(&wm_low) || 977 !dce_v6_0_check_latency_hiding(&wm_low) || 978 (adev->mode_info.disp_priority == 2)) { 979 DRM_DEBUG_KMS("force priority to high\n"); 980 priority_a_cnt |= PRIORITY_ALWAYS_ON; 981 priority_b_cnt |= PRIORITY_ALWAYS_ON; 982 } 983 984 a.full = dfixed_const(1000); 985 b.full = dfixed_const(mode->clock); 986 b.full = dfixed_div(b, a); 987 c.full = dfixed_const(latency_watermark_a); 988 c.full = dfixed_mul(c, b); 989 c.full = dfixed_mul(c, amdgpu_crtc->hsc); 990 c.full = dfixed_div(c, a); 991 a.full = dfixed_const(16); 992 c.full = dfixed_div(c, a); 993 priority_a_mark = dfixed_trunc(c); 994 priority_a_cnt |= priority_a_mark & PRIORITY_MARK_MASK; 995 996 a.full = dfixed_const(1000); 997 b.full = dfixed_const(mode->clock); 998 b.full = dfixed_div(b, a); 999 c.full = dfixed_const(latency_watermark_b); 1000 c.full = dfixed_mul(c, b); 1001 c.full = dfixed_mul(c, amdgpu_crtc->hsc); 1002 c.full = dfixed_div(c, a); 1003 a.full = dfixed_const(16); 1004 c.full = dfixed_div(c, a); 1005 priority_b_mark = dfixed_trunc(c); 1006 priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK; 1007 1008 lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay); 1009 } 1010 1011 /* select wm A */ 1012 arb_control3 = RREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset); 1013 tmp = arb_control3; 1014 tmp &= ~(3 << DPG_PIPE_ARBITRATION_CONTROL3__URGENCY_WATERMARK_MASK__SHIFT); 1015 tmp |= (1 << DPG_PIPE_ARBITRATION_CONTROL3__URGENCY_WATERMARK_MASK__SHIFT); 1016 WREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, tmp); 1017 WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset, 1018 ((latency_watermark_a << DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT) | 1019 (line_time << DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT))); 1020 /* select wm B */ 1021 tmp = RREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset); 1022 tmp &= ~(3 << DPG_PIPE_ARBITRATION_CONTROL3__URGENCY_WATERMARK_MASK__SHIFT); 1023 tmp |= (2 << DPG_PIPE_ARBITRATION_CONTROL3__URGENCY_WATERMARK_MASK__SHIFT); 1024 WREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, tmp); 1025 WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset, 1026 ((latency_watermark_b << DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT) | 1027 (line_time << DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT))); 1028 /* restore original selection */ 1029 WREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, arb_control3); 1030 1031 /* write the priority marks */ 1032 WREG32(mmPRIORITY_A_CNT + amdgpu_crtc->crtc_offset, priority_a_cnt); 1033 WREG32(mmPRIORITY_B_CNT + amdgpu_crtc->crtc_offset, priority_b_cnt); 1034 1035 /* save values for DPM */ 1036 amdgpu_crtc->line_time = line_time; 1037 amdgpu_crtc->wm_high = latency_watermark_a; 1038 1039 /* Save number of lines the linebuffer leads before the scanout */ 1040 amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines; 1041 } 1042 1043 /* watermark setup */ 1044 /** 1045 * dce_v6_0_line_buffer_adjust - Set up the line buffer 1046 * 1047 * @adev: amdgpu_device pointer 1048 * @amdgpu_crtc: the selected display controller 1049 * @mode: the current display mode on the selected display 1050 * controller 1051 * @other_mode: the display mode of another display controller 1052 * that may be sharing the line buffer 1053 * 1054 * Setup up the line buffer allocation for 1055 * the selected display controller (CIK). 1056 * Returns the line buffer size in pixels. 1057 */ 1058 static u32 dce_v6_0_line_buffer_adjust(struct amdgpu_device *adev, 1059 struct amdgpu_crtc *amdgpu_crtc, 1060 struct drm_display_mode *mode, 1061 struct drm_display_mode *other_mode) 1062 { 1063 u32 tmp, buffer_alloc, i; 1064 u32 pipe_offset = amdgpu_crtc->crtc_id * 0x8; 1065 /* 1066 * Line Buffer Setup 1067 * There are 3 line buffers, each one shared by 2 display controllers. 1068 * mmDC_LB_MEMORY_SPLIT controls how that line buffer is shared between 1069 * the display controllers. The paritioning is done via one of four 1070 * preset allocations specified in bits 21:20: 1071 * 0 - half lb 1072 * 2 - whole lb, other crtc must be disabled 1073 */ 1074 /* this can get tricky if we have two large displays on a paired group 1075 * of crtcs. Ideally for multiple large displays we'd assign them to 1076 * non-linked crtcs for maximum line buffer allocation. 1077 */ 1078 if (amdgpu_crtc->base.enabled && mode) { 1079 if (other_mode) { 1080 tmp = 0; /* 1/2 */ 1081 buffer_alloc = 1; 1082 } else { 1083 tmp = 2; /* whole */ 1084 buffer_alloc = 2; 1085 } 1086 } else { 1087 tmp = 0; 1088 buffer_alloc = 0; 1089 } 1090 1091 WREG32(mmDC_LB_MEMORY_SPLIT + amdgpu_crtc->crtc_offset, 1092 (tmp << DC_LB_MEMORY_SPLIT__DC_LB_MEMORY_CONFIG__SHIFT)); 1093 1094 WREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset, 1095 (buffer_alloc << PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATED__SHIFT)); 1096 for (i = 0; i < adev->usec_timeout; i++) { 1097 if (RREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset) & 1098 PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATION_COMPLETED_MASK) 1099 break; 1100 udelay(1); 1101 } 1102 1103 if (amdgpu_crtc->base.enabled && mode) { 1104 switch (tmp) { 1105 case 0: 1106 default: 1107 return 4096 * 2; 1108 case 2: 1109 return 8192 * 2; 1110 } 1111 } 1112 1113 /* controller not enabled, so no lb used */ 1114 return 0; 1115 } 1116 1117 1118 /** 1119 * dce_v6_0_bandwidth_update - program display watermarks 1120 * 1121 * @adev: amdgpu_device pointer 1122 * 1123 * Calculate and program the display watermarks and line 1124 * buffer allocation (CIK). 1125 */ 1126 static void dce_v6_0_bandwidth_update(struct amdgpu_device *adev) 1127 { 1128 struct drm_display_mode *mode0 = NULL; 1129 struct drm_display_mode *mode1 = NULL; 1130 u32 num_heads = 0, lb_size; 1131 int i; 1132 1133 if (!adev->mode_info.mode_config_initialized) 1134 return; 1135 1136 amdgpu_display_update_priority(adev); 1137 1138 for (i = 0; i < adev->mode_info.num_crtc; i++) { 1139 if (adev->mode_info.crtcs[i]->base.enabled) 1140 num_heads++; 1141 } 1142 for (i = 0; i < adev->mode_info.num_crtc; i += 2) { 1143 mode0 = &adev->mode_info.crtcs[i]->base.mode; 1144 mode1 = &adev->mode_info.crtcs[i+1]->base.mode; 1145 lb_size = dce_v6_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i], mode0, mode1); 1146 dce_v6_0_program_watermarks(adev, adev->mode_info.crtcs[i], lb_size, num_heads); 1147 lb_size = dce_v6_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i+1], mode1, mode0); 1148 dce_v6_0_program_watermarks(adev, adev->mode_info.crtcs[i+1], lb_size, num_heads); 1149 } 1150 } 1151 1152 static void dce_v6_0_audio_get_connected_pins(struct amdgpu_device *adev) 1153 { 1154 int i; 1155 u32 tmp; 1156 1157 for (i = 0; i < adev->mode_info.audio.num_pins; i++) { 1158 tmp = RREG32_AUDIO_ENDPT(adev->mode_info.audio.pin[i].offset, 1159 ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT); 1160 if (REG_GET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT, 1161 PORT_CONNECTIVITY)) 1162 adev->mode_info.audio.pin[i].connected = false; 1163 else 1164 adev->mode_info.audio.pin[i].connected = true; 1165 } 1166 1167 } 1168 1169 static struct amdgpu_audio_pin *dce_v6_0_audio_get_pin(struct amdgpu_device *adev) 1170 { 1171 int i; 1172 1173 dce_v6_0_audio_get_connected_pins(adev); 1174 1175 for (i = 0; i < adev->mode_info.audio.num_pins; i++) { 1176 if (adev->mode_info.audio.pin[i].connected) 1177 return &adev->mode_info.audio.pin[i]; 1178 } 1179 DRM_ERROR("No connected audio pins found!\n"); 1180 return NULL; 1181 } 1182 1183 static void dce_v6_0_audio_select_pin(struct drm_encoder *encoder) 1184 { 1185 struct amdgpu_device *adev = drm_to_adev(encoder->dev); 1186 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 1187 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 1188 1189 if (!dig || !dig->afmt || !dig->afmt->pin) 1190 return; 1191 1192 WREG32(mmAFMT_AUDIO_SRC_CONTROL + dig->afmt->offset, 1193 REG_SET_FIELD(0, AFMT_AUDIO_SRC_CONTROL, AFMT_AUDIO_SRC_SELECT, 1194 dig->afmt->pin->id)); 1195 } 1196 1197 static void dce_v6_0_audio_write_latency_fields(struct drm_encoder *encoder, 1198 struct drm_display_mode *mode) 1199 { 1200 struct drm_device *dev = encoder->dev; 1201 struct amdgpu_device *adev = drm_to_adev(dev); 1202 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 1203 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 1204 struct drm_connector *connector; 1205 struct drm_connector_list_iter iter; 1206 struct amdgpu_connector *amdgpu_connector = NULL; 1207 int interlace = 0; 1208 u32 tmp; 1209 1210 drm_connector_list_iter_begin(dev, &iter); 1211 drm_for_each_connector_iter(connector, &iter) { 1212 if (connector->encoder == encoder) { 1213 amdgpu_connector = to_amdgpu_connector(connector); 1214 break; 1215 } 1216 } 1217 drm_connector_list_iter_end(&iter); 1218 1219 if (!amdgpu_connector) { 1220 DRM_ERROR("Couldn't find encoder's connector\n"); 1221 return; 1222 } 1223 1224 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 1225 interlace = 1; 1226 1227 if (connector->latency_present[interlace]) { 1228 tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, 1229 VIDEO_LIPSYNC, connector->video_latency[interlace]); 1230 tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, 1231 AUDIO_LIPSYNC, connector->audio_latency[interlace]); 1232 } else { 1233 tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, 1234 VIDEO_LIPSYNC, 0); 1235 tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, 1236 AUDIO_LIPSYNC, 0); 1237 } 1238 WREG32_AUDIO_ENDPT(dig->afmt->pin->offset, 1239 ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp); 1240 } 1241 1242 static void dce_v6_0_audio_write_speaker_allocation(struct drm_encoder *encoder) 1243 { 1244 struct drm_device *dev = encoder->dev; 1245 struct amdgpu_device *adev = drm_to_adev(dev); 1246 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 1247 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 1248 struct drm_connector *connector; 1249 struct drm_connector_list_iter iter; 1250 struct amdgpu_connector *amdgpu_connector = NULL; 1251 u8 *sadb = NULL; 1252 int sad_count; 1253 u32 tmp; 1254 1255 drm_connector_list_iter_begin(dev, &iter); 1256 drm_for_each_connector_iter(connector, &iter) { 1257 if (connector->encoder == encoder) { 1258 amdgpu_connector = to_amdgpu_connector(connector); 1259 break; 1260 } 1261 } 1262 drm_connector_list_iter_end(&iter); 1263 1264 if (!amdgpu_connector) { 1265 DRM_ERROR("Couldn't find encoder's connector\n"); 1266 return; 1267 } 1268 1269 sad_count = drm_edid_to_speaker_allocation(amdgpu_connector->edid, &sadb); 1270 if (sad_count < 0) { 1271 DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count); 1272 sad_count = 0; 1273 } 1274 1275 /* program the speaker allocation */ 1276 tmp = RREG32_AUDIO_ENDPT(dig->afmt->pin->offset, 1277 ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER); 1278 tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, 1279 HDMI_CONNECTION, 0); 1280 tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, 1281 DP_CONNECTION, 0); 1282 1283 if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) 1284 tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, 1285 DP_CONNECTION, 1); 1286 else 1287 tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, 1288 HDMI_CONNECTION, 1); 1289 1290 if (sad_count) 1291 tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, 1292 SPEAKER_ALLOCATION, sadb[0]); 1293 else 1294 tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, 1295 SPEAKER_ALLOCATION, 5); /* stereo */ 1296 1297 WREG32_AUDIO_ENDPT(dig->afmt->pin->offset, 1298 ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp); 1299 1300 kfree(sadb); 1301 } 1302 1303 static void dce_v6_0_audio_write_sad_regs(struct drm_encoder *encoder) 1304 { 1305 struct drm_device *dev = encoder->dev; 1306 struct amdgpu_device *adev = drm_to_adev(dev); 1307 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 1308 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 1309 u32 offset; 1310 struct drm_connector *connector; 1311 struct drm_connector_list_iter iter; 1312 struct amdgpu_connector *amdgpu_connector = NULL; 1313 struct cea_sad *sads; 1314 int i, sad_count; 1315 1316 static const u16 eld_reg_to_type[][2] = { 1317 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, HDMI_AUDIO_CODING_TYPE_PCM }, 1318 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1, HDMI_AUDIO_CODING_TYPE_AC3 }, 1319 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2, HDMI_AUDIO_CODING_TYPE_MPEG1 }, 1320 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3, HDMI_AUDIO_CODING_TYPE_MP3 }, 1321 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4, HDMI_AUDIO_CODING_TYPE_MPEG2 }, 1322 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5, HDMI_AUDIO_CODING_TYPE_AAC_LC }, 1323 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6, HDMI_AUDIO_CODING_TYPE_DTS }, 1324 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7, HDMI_AUDIO_CODING_TYPE_ATRAC }, 1325 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9, HDMI_AUDIO_CODING_TYPE_EAC3 }, 1326 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10, HDMI_AUDIO_CODING_TYPE_DTS_HD }, 1327 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11, HDMI_AUDIO_CODING_TYPE_MLP }, 1328 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO }, 1329 }; 1330 1331 if (!dig || !dig->afmt || !dig->afmt->pin) 1332 return; 1333 1334 offset = dig->afmt->pin->offset; 1335 1336 drm_connector_list_iter_begin(dev, &iter); 1337 drm_for_each_connector_iter(connector, &iter) { 1338 if (connector->encoder == encoder) { 1339 amdgpu_connector = to_amdgpu_connector(connector); 1340 break; 1341 } 1342 } 1343 drm_connector_list_iter_end(&iter); 1344 1345 if (!amdgpu_connector) { 1346 DRM_ERROR("Couldn't find encoder's connector\n"); 1347 return; 1348 } 1349 1350 sad_count = drm_edid_to_sad(amdgpu_connector->edid, &sads); 1351 if (sad_count < 0) 1352 DRM_ERROR("Couldn't read SADs: %d\n", sad_count); 1353 if (sad_count <= 0) 1354 return; 1355 1356 for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) { 1357 u32 value = 0; 1358 u8 stereo_freqs = 0; 1359 int max_channels = -1; 1360 int j; 1361 1362 for (j = 0; j < sad_count; j++) { 1363 struct cea_sad *sad = &sads[j]; 1364 1365 if (sad->format == eld_reg_to_type[i][1]) { 1366 if (sad->channels > max_channels) { 1367 value = (sad->channels << 1368 AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS__SHIFT) | 1369 (sad->byte2 << 1370 AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2__SHIFT) | 1371 (sad->freq << 1372 AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES__SHIFT); 1373 max_channels = sad->channels; 1374 } 1375 1376 if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM) 1377 stereo_freqs |= sad->freq; 1378 else 1379 break; 1380 } 1381 } 1382 1383 value |= (stereo_freqs << 1384 AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO__SHIFT); 1385 1386 WREG32_AUDIO_ENDPT(offset, eld_reg_to_type[i][0], value); 1387 } 1388 1389 kfree(sads); 1390 } 1391 1392 static void dce_v6_0_audio_enable(struct amdgpu_device *adev, 1393 struct amdgpu_audio_pin *pin, 1394 bool enable) 1395 { 1396 if (!pin) 1397 return; 1398 1399 WREG32_AUDIO_ENDPT(pin->offset, ixAZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL, 1400 enable ? AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK : 0); 1401 } 1402 1403 static const u32 pin_offsets[7] = 1404 { 1405 AUD0_REGISTER_OFFSET, 1406 AUD1_REGISTER_OFFSET, 1407 AUD2_REGISTER_OFFSET, 1408 AUD3_REGISTER_OFFSET, 1409 AUD4_REGISTER_OFFSET, 1410 AUD5_REGISTER_OFFSET, 1411 AUD6_REGISTER_OFFSET, 1412 }; 1413 1414 static int dce_v6_0_audio_init(struct amdgpu_device *adev) 1415 { 1416 int i; 1417 1418 if (!amdgpu_audio) 1419 return 0; 1420 1421 adev->mode_info.audio.enabled = true; 1422 1423 switch (adev->asic_type) { 1424 case CHIP_TAHITI: 1425 case CHIP_PITCAIRN: 1426 case CHIP_VERDE: 1427 default: 1428 adev->mode_info.audio.num_pins = 6; 1429 break; 1430 case CHIP_OLAND: 1431 adev->mode_info.audio.num_pins = 2; 1432 break; 1433 } 1434 1435 for (i = 0; i < adev->mode_info.audio.num_pins; i++) { 1436 adev->mode_info.audio.pin[i].channels = -1; 1437 adev->mode_info.audio.pin[i].rate = -1; 1438 adev->mode_info.audio.pin[i].bits_per_sample = -1; 1439 adev->mode_info.audio.pin[i].status_bits = 0; 1440 adev->mode_info.audio.pin[i].category_code = 0; 1441 adev->mode_info.audio.pin[i].connected = false; 1442 adev->mode_info.audio.pin[i].offset = pin_offsets[i]; 1443 adev->mode_info.audio.pin[i].id = i; 1444 /* disable audio. it will be set up later */ 1445 /* XXX remove once we switch to ip funcs */ 1446 dce_v6_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false); 1447 } 1448 1449 return 0; 1450 } 1451 1452 static void dce_v6_0_audio_fini(struct amdgpu_device *adev) 1453 { 1454 int i; 1455 1456 if (!amdgpu_audio) 1457 return; 1458 1459 if (!adev->mode_info.audio.enabled) 1460 return; 1461 1462 for (i = 0; i < adev->mode_info.audio.num_pins; i++) 1463 dce_v6_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false); 1464 1465 adev->mode_info.audio.enabled = false; 1466 } 1467 1468 static void dce_v6_0_audio_set_vbi_packet(struct drm_encoder *encoder) 1469 { 1470 struct drm_device *dev = encoder->dev; 1471 struct amdgpu_device *adev = drm_to_adev(dev); 1472 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 1473 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 1474 u32 tmp; 1475 1476 tmp = RREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset); 1477 tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_NULL_SEND, 1); 1478 tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_GC_SEND, 1); 1479 tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_GC_CONT, 1); 1480 WREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset, tmp); 1481 } 1482 1483 static void dce_v6_0_audio_set_acr(struct drm_encoder *encoder, 1484 uint32_t clock, int bpc) 1485 { 1486 struct drm_device *dev = encoder->dev; 1487 struct amdgpu_device *adev = drm_to_adev(dev); 1488 struct amdgpu_afmt_acr acr = amdgpu_afmt_acr(clock); 1489 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 1490 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 1491 u32 tmp; 1492 1493 tmp = RREG32(mmHDMI_ACR_PACKET_CONTROL + dig->afmt->offset); 1494 tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_AUTO_SEND, 1); 1495 tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_SOURCE, 1496 bpc > 8 ? 0 : 1); 1497 WREG32(mmHDMI_ACR_PACKET_CONTROL + dig->afmt->offset, tmp); 1498 1499 tmp = RREG32(mmHDMI_ACR_32_0 + dig->afmt->offset); 1500 tmp = REG_SET_FIELD(tmp, HDMI_ACR_32_0, HDMI_ACR_CTS_32, acr.cts_32khz); 1501 WREG32(mmHDMI_ACR_32_0 + dig->afmt->offset, tmp); 1502 tmp = RREG32(mmHDMI_ACR_32_1 + dig->afmt->offset); 1503 tmp = REG_SET_FIELD(tmp, HDMI_ACR_32_1, HDMI_ACR_N_32, acr.n_32khz); 1504 WREG32(mmHDMI_ACR_32_1 + dig->afmt->offset, tmp); 1505 1506 tmp = RREG32(mmHDMI_ACR_44_0 + dig->afmt->offset); 1507 tmp = REG_SET_FIELD(tmp, HDMI_ACR_44_0, HDMI_ACR_CTS_44, acr.cts_44_1khz); 1508 WREG32(mmHDMI_ACR_44_0 + dig->afmt->offset, tmp); 1509 tmp = RREG32(mmHDMI_ACR_44_1 + dig->afmt->offset); 1510 tmp = REG_SET_FIELD(tmp, HDMI_ACR_44_1, HDMI_ACR_N_44, acr.n_44_1khz); 1511 WREG32(mmHDMI_ACR_44_1 + dig->afmt->offset, tmp); 1512 1513 tmp = RREG32(mmHDMI_ACR_48_0 + dig->afmt->offset); 1514 tmp = REG_SET_FIELD(tmp, HDMI_ACR_48_0, HDMI_ACR_CTS_48, acr.cts_48khz); 1515 WREG32(mmHDMI_ACR_48_0 + dig->afmt->offset, tmp); 1516 tmp = RREG32(mmHDMI_ACR_48_1 + dig->afmt->offset); 1517 tmp = REG_SET_FIELD(tmp, HDMI_ACR_48_1, HDMI_ACR_N_48, acr.n_48khz); 1518 WREG32(mmHDMI_ACR_48_1 + dig->afmt->offset, tmp); 1519 } 1520 1521 static void dce_v6_0_audio_set_avi_infoframe(struct drm_encoder *encoder, 1522 struct drm_display_mode *mode) 1523 { 1524 struct drm_device *dev = encoder->dev; 1525 struct amdgpu_device *adev = drm_to_adev(dev); 1526 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 1527 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 1528 struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder); 1529 struct hdmi_avi_infoframe frame; 1530 u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE]; 1531 uint8_t *payload = buffer + 3; 1532 uint8_t *header = buffer; 1533 ssize_t err; 1534 u32 tmp; 1535 1536 err = drm_hdmi_avi_infoframe_from_display_mode(&frame, connector, mode); 1537 if (err < 0) { 1538 DRM_ERROR("failed to setup AVI infoframe: %zd\n", err); 1539 return; 1540 } 1541 1542 err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer)); 1543 if (err < 0) { 1544 DRM_ERROR("failed to pack AVI infoframe: %zd\n", err); 1545 return; 1546 } 1547 1548 WREG32(mmAFMT_AVI_INFO0 + dig->afmt->offset, 1549 payload[0x0] | (payload[0x1] << 8) | (payload[0x2] << 16) | (payload[0x3] << 24)); 1550 WREG32(mmAFMT_AVI_INFO1 + dig->afmt->offset, 1551 payload[0x4] | (payload[0x5] << 8) | (payload[0x6] << 16) | (payload[0x7] << 24)); 1552 WREG32(mmAFMT_AVI_INFO2 + dig->afmt->offset, 1553 payload[0x8] | (payload[0x9] << 8) | (payload[0xA] << 16) | (payload[0xB] << 24)); 1554 WREG32(mmAFMT_AVI_INFO3 + dig->afmt->offset, 1555 payload[0xC] | (payload[0xD] << 8) | (header[1] << 24)); 1556 1557 tmp = RREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset); 1558 /* anything other than 0 */ 1559 tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL1, 1560 HDMI_AUDIO_INFO_LINE, 2); 1561 WREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset, tmp); 1562 } 1563 1564 static void dce_v6_0_audio_set_dto(struct drm_encoder *encoder, u32 clock) 1565 { 1566 struct drm_device *dev = encoder->dev; 1567 struct amdgpu_device *adev = drm_to_adev(dev); 1568 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc); 1569 int em = amdgpu_atombios_encoder_get_encoder_mode(encoder); 1570 u32 tmp; 1571 1572 /* 1573 * Two dtos: generally use dto0 for hdmi, dto1 for dp. 1574 * Express [24MHz / target pixel clock] as an exact rational 1575 * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE 1576 * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator 1577 */ 1578 tmp = RREG32(mmDCCG_AUDIO_DTO_SOURCE); 1579 tmp = REG_SET_FIELD(tmp, DCCG_AUDIO_DTO_SOURCE, 1580 DCCG_AUDIO_DTO0_SOURCE_SEL, amdgpu_crtc->crtc_id); 1581 if (em == ATOM_ENCODER_MODE_HDMI) { 1582 tmp = REG_SET_FIELD(tmp, DCCG_AUDIO_DTO_SOURCE, 1583 DCCG_AUDIO_DTO_SEL, 0); 1584 } else if (ENCODER_MODE_IS_DP(em)) { 1585 tmp = REG_SET_FIELD(tmp, DCCG_AUDIO_DTO_SOURCE, 1586 DCCG_AUDIO_DTO_SEL, 1); 1587 } 1588 WREG32(mmDCCG_AUDIO_DTO_SOURCE, tmp); 1589 if (em == ATOM_ENCODER_MODE_HDMI) { 1590 WREG32(mmDCCG_AUDIO_DTO0_PHASE, 24000); 1591 WREG32(mmDCCG_AUDIO_DTO0_MODULE, clock); 1592 } else if (ENCODER_MODE_IS_DP(em)) { 1593 WREG32(mmDCCG_AUDIO_DTO1_PHASE, 24000); 1594 WREG32(mmDCCG_AUDIO_DTO1_MODULE, clock); 1595 } 1596 } 1597 1598 static void dce_v6_0_audio_set_packet(struct drm_encoder *encoder) 1599 { 1600 struct drm_device *dev = encoder->dev; 1601 struct amdgpu_device *adev = drm_to_adev(dev); 1602 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 1603 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 1604 u32 tmp; 1605 1606 tmp = RREG32(mmAFMT_INFOFRAME_CONTROL0 + dig->afmt->offset); 1607 tmp = REG_SET_FIELD(tmp, AFMT_INFOFRAME_CONTROL0, AFMT_AUDIO_INFO_UPDATE, 1); 1608 WREG32(mmAFMT_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp); 1609 1610 tmp = RREG32(mmAFMT_60958_0 + dig->afmt->offset); 1611 tmp = REG_SET_FIELD(tmp, AFMT_60958_0, AFMT_60958_CS_CHANNEL_NUMBER_L, 1); 1612 WREG32(mmAFMT_60958_0 + dig->afmt->offset, tmp); 1613 1614 tmp = RREG32(mmAFMT_60958_1 + dig->afmt->offset); 1615 tmp = REG_SET_FIELD(tmp, AFMT_60958_1, AFMT_60958_CS_CHANNEL_NUMBER_R, 2); 1616 WREG32(mmAFMT_60958_1 + dig->afmt->offset, tmp); 1617 1618 tmp = RREG32(mmAFMT_60958_2 + dig->afmt->offset); 1619 tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_2, 3); 1620 tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_3, 4); 1621 tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_4, 5); 1622 tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_5, 6); 1623 tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_6, 7); 1624 tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_7, 8); 1625 WREG32(mmAFMT_60958_2 + dig->afmt->offset, tmp); 1626 1627 tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL2 + dig->afmt->offset); 1628 tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL2, AFMT_AUDIO_CHANNEL_ENABLE, 0xff); 1629 WREG32(mmAFMT_AUDIO_PACKET_CONTROL2 + dig->afmt->offset, tmp); 1630 1631 tmp = RREG32(mmHDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset); 1632 tmp = REG_SET_FIELD(tmp, HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_DELAY_EN, 1); 1633 tmp = REG_SET_FIELD(tmp, HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_PACKETS_PER_LINE, 3); 1634 WREG32(mmHDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp); 1635 1636 tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset); 1637 tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_RESET_FIFO_WHEN_AUDIO_DIS, 1); 1638 tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_60958_CS_UPDATE, 1); 1639 WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp); 1640 } 1641 1642 static void dce_v6_0_audio_set_mute(struct drm_encoder *encoder, bool mute) 1643 { 1644 struct drm_device *dev = encoder->dev; 1645 struct amdgpu_device *adev = drm_to_adev(dev); 1646 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 1647 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 1648 u32 tmp; 1649 1650 tmp = RREG32(mmHDMI_GC + dig->afmt->offset); 1651 tmp = REG_SET_FIELD(tmp, HDMI_GC, HDMI_GC_AVMUTE, mute ? 1 : 0); 1652 WREG32(mmHDMI_GC + dig->afmt->offset, tmp); 1653 } 1654 1655 static void dce_v6_0_audio_hdmi_enable(struct drm_encoder *encoder, bool enable) 1656 { 1657 struct drm_device *dev = encoder->dev; 1658 struct amdgpu_device *adev = drm_to_adev(dev); 1659 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 1660 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 1661 u32 tmp; 1662 1663 if (enable) { 1664 tmp = RREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset); 1665 tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_SEND, 1); 1666 tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_CONT, 1); 1667 tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_SEND, 1); 1668 tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_CONT, 1); 1669 WREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp); 1670 1671 tmp = RREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset); 1672 tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL1, HDMI_AVI_INFO_LINE, 2); 1673 WREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset, tmp); 1674 1675 tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset); 1676 tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, 1); 1677 WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp); 1678 } else { 1679 tmp = RREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset); 1680 tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_SEND, 0); 1681 tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_CONT, 0); 1682 tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_SEND, 0); 1683 tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_CONT, 0); 1684 WREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp); 1685 1686 tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset); 1687 tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, 0); 1688 WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp); 1689 } 1690 } 1691 1692 static void dce_v6_0_audio_dp_enable(struct drm_encoder *encoder, bool enable) 1693 { 1694 struct drm_device *dev = encoder->dev; 1695 struct amdgpu_device *adev = drm_to_adev(dev); 1696 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 1697 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 1698 u32 tmp; 1699 1700 if (enable) { 1701 tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset); 1702 tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, 1); 1703 WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp); 1704 1705 tmp = RREG32(mmDP_SEC_TIMESTAMP + dig->afmt->offset); 1706 tmp = REG_SET_FIELD(tmp, DP_SEC_TIMESTAMP, DP_SEC_TIMESTAMP_MODE, 1); 1707 WREG32(mmDP_SEC_TIMESTAMP + dig->afmt->offset, tmp); 1708 1709 tmp = RREG32(mmDP_SEC_CNTL + dig->afmt->offset); 1710 tmp = REG_SET_FIELD(tmp, DP_SEC_CNTL, DP_SEC_ASP_ENABLE, 1); 1711 tmp = REG_SET_FIELD(tmp, DP_SEC_CNTL, DP_SEC_ATP_ENABLE, 1); 1712 tmp = REG_SET_FIELD(tmp, DP_SEC_CNTL, DP_SEC_AIP_ENABLE, 1); 1713 tmp = REG_SET_FIELD(tmp, DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, 1); 1714 WREG32(mmDP_SEC_CNTL + dig->afmt->offset, tmp); 1715 } else { 1716 WREG32(mmDP_SEC_CNTL + dig->afmt->offset, 0); 1717 } 1718 } 1719 1720 static void dce_v6_0_afmt_setmode(struct drm_encoder *encoder, 1721 struct drm_display_mode *mode) 1722 { 1723 struct drm_device *dev = encoder->dev; 1724 struct amdgpu_device *adev = drm_to_adev(dev); 1725 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 1726 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 1727 struct drm_connector *connector; 1728 struct drm_connector_list_iter iter; 1729 struct amdgpu_connector *amdgpu_connector = NULL; 1730 int em = amdgpu_atombios_encoder_get_encoder_mode(encoder); 1731 int bpc = 8; 1732 1733 if (!dig || !dig->afmt) 1734 return; 1735 1736 drm_connector_list_iter_begin(dev, &iter); 1737 drm_for_each_connector_iter(connector, &iter) { 1738 if (connector->encoder == encoder) { 1739 amdgpu_connector = to_amdgpu_connector(connector); 1740 break; 1741 } 1742 } 1743 drm_connector_list_iter_end(&iter); 1744 1745 if (!amdgpu_connector) { 1746 DRM_ERROR("Couldn't find encoder's connector\n"); 1747 return; 1748 } 1749 1750 if (!dig->afmt->enabled) 1751 return; 1752 1753 dig->afmt->pin = dce_v6_0_audio_get_pin(adev); 1754 if (!dig->afmt->pin) 1755 return; 1756 1757 if (encoder->crtc) { 1758 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc); 1759 bpc = amdgpu_crtc->bpc; 1760 } 1761 1762 /* disable audio before setting up hw */ 1763 dce_v6_0_audio_enable(adev, dig->afmt->pin, false); 1764 1765 dce_v6_0_audio_set_mute(encoder, true); 1766 dce_v6_0_audio_write_speaker_allocation(encoder); 1767 dce_v6_0_audio_write_sad_regs(encoder); 1768 dce_v6_0_audio_write_latency_fields(encoder, mode); 1769 if (em == ATOM_ENCODER_MODE_HDMI) { 1770 dce_v6_0_audio_set_dto(encoder, mode->clock); 1771 dce_v6_0_audio_set_vbi_packet(encoder); 1772 dce_v6_0_audio_set_acr(encoder, mode->clock, bpc); 1773 } else if (ENCODER_MODE_IS_DP(em)) { 1774 dce_v6_0_audio_set_dto(encoder, adev->clock.default_dispclk * 10); 1775 } 1776 dce_v6_0_audio_set_packet(encoder); 1777 dce_v6_0_audio_select_pin(encoder); 1778 dce_v6_0_audio_set_avi_infoframe(encoder, mode); 1779 dce_v6_0_audio_set_mute(encoder, false); 1780 if (em == ATOM_ENCODER_MODE_HDMI) { 1781 dce_v6_0_audio_hdmi_enable(encoder, 1); 1782 } else if (ENCODER_MODE_IS_DP(em)) { 1783 dce_v6_0_audio_dp_enable(encoder, 1); 1784 } 1785 1786 /* enable audio after setting up hw */ 1787 dce_v6_0_audio_enable(adev, dig->afmt->pin, true); 1788 } 1789 1790 static void dce_v6_0_afmt_enable(struct drm_encoder *encoder, bool enable) 1791 { 1792 struct drm_device *dev = encoder->dev; 1793 struct amdgpu_device *adev = drm_to_adev(dev); 1794 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 1795 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 1796 1797 if (!dig || !dig->afmt) 1798 return; 1799 1800 /* Silent, r600_hdmi_enable will raise WARN for us */ 1801 if (enable && dig->afmt->enabled) 1802 return; 1803 1804 if (!enable && !dig->afmt->enabled) 1805 return; 1806 1807 if (!enable && dig->afmt->pin) { 1808 dce_v6_0_audio_enable(adev, dig->afmt->pin, false); 1809 dig->afmt->pin = NULL; 1810 } 1811 1812 dig->afmt->enabled = enable; 1813 1814 DRM_DEBUG("%sabling AFMT interface @ 0x%04X for encoder 0x%x\n", 1815 enable ? "En" : "Dis", dig->afmt->offset, amdgpu_encoder->encoder_id); 1816 } 1817 1818 static int dce_v6_0_afmt_init(struct amdgpu_device *adev) 1819 { 1820 int i, j; 1821 1822 for (i = 0; i < adev->mode_info.num_dig; i++) 1823 adev->mode_info.afmt[i] = NULL; 1824 1825 /* DCE6 has audio blocks tied to DIG encoders */ 1826 for (i = 0; i < adev->mode_info.num_dig; i++) { 1827 adev->mode_info.afmt[i] = kzalloc(sizeof(struct amdgpu_afmt), GFP_KERNEL); 1828 if (adev->mode_info.afmt[i]) { 1829 adev->mode_info.afmt[i]->offset = dig_offsets[i]; 1830 adev->mode_info.afmt[i]->id = i; 1831 } else { 1832 for (j = 0; j < i; j++) { 1833 kfree(adev->mode_info.afmt[j]); 1834 adev->mode_info.afmt[j] = NULL; 1835 } 1836 DRM_ERROR("Out of memory allocating afmt table\n"); 1837 return -ENOMEM; 1838 } 1839 } 1840 return 0; 1841 } 1842 1843 static void dce_v6_0_afmt_fini(struct amdgpu_device *adev) 1844 { 1845 int i; 1846 1847 for (i = 0; i < adev->mode_info.num_dig; i++) { 1848 kfree(adev->mode_info.afmt[i]); 1849 adev->mode_info.afmt[i] = NULL; 1850 } 1851 } 1852 1853 static const u32 vga_control_regs[6] = 1854 { 1855 mmD1VGA_CONTROL, 1856 mmD2VGA_CONTROL, 1857 mmD3VGA_CONTROL, 1858 mmD4VGA_CONTROL, 1859 mmD5VGA_CONTROL, 1860 mmD6VGA_CONTROL, 1861 }; 1862 1863 static void dce_v6_0_vga_enable(struct drm_crtc *crtc, bool enable) 1864 { 1865 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 1866 struct drm_device *dev = crtc->dev; 1867 struct amdgpu_device *adev = drm_to_adev(dev); 1868 u32 vga_control; 1869 1870 vga_control = RREG32(vga_control_regs[amdgpu_crtc->crtc_id]) & ~1; 1871 WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control | (enable ? 1 : 0)); 1872 } 1873 1874 static void dce_v6_0_grph_enable(struct drm_crtc *crtc, bool enable) 1875 { 1876 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 1877 struct drm_device *dev = crtc->dev; 1878 struct amdgpu_device *adev = drm_to_adev(dev); 1879 1880 WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, enable ? 1 : 0); 1881 } 1882 1883 static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc, 1884 struct drm_framebuffer *fb, 1885 int x, int y, int atomic) 1886 { 1887 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 1888 struct drm_device *dev = crtc->dev; 1889 struct amdgpu_device *adev = drm_to_adev(dev); 1890 struct drm_framebuffer *target_fb; 1891 struct drm_gem_object *obj; 1892 struct amdgpu_bo *abo; 1893 uint64_t fb_location, tiling_flags; 1894 uint32_t fb_format, fb_pitch_pixels, pipe_config; 1895 u32 fb_swap = (GRPH_ENDIAN_NONE << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT); 1896 u32 viewport_w, viewport_h; 1897 int r; 1898 bool bypass_lut = false; 1899 1900 /* no fb bound */ 1901 if (!atomic && !crtc->primary->fb) { 1902 DRM_DEBUG_KMS("No FB bound\n"); 1903 return 0; 1904 } 1905 1906 if (atomic) 1907 target_fb = fb; 1908 else 1909 target_fb = crtc->primary->fb; 1910 1911 /* If atomic, assume fb object is pinned & idle & fenced and 1912 * just update base pointers 1913 */ 1914 obj = target_fb->obj[0]; 1915 abo = gem_to_amdgpu_bo(obj); 1916 r = amdgpu_bo_reserve(abo, false); 1917 if (unlikely(r != 0)) 1918 return r; 1919 1920 if (!atomic) { 1921 abo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; 1922 r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM); 1923 if (unlikely(r != 0)) { 1924 amdgpu_bo_unreserve(abo); 1925 return -EINVAL; 1926 } 1927 } 1928 fb_location = amdgpu_bo_gpu_offset(abo); 1929 1930 amdgpu_bo_get_tiling_flags(abo, &tiling_flags); 1931 amdgpu_bo_unreserve(abo); 1932 1933 switch (target_fb->format->format) { 1934 case DRM_FORMAT_C8: 1935 fb_format = ((GRPH_DEPTH_8BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) | 1936 (GRPH_FORMAT_INDEXED << GRPH_CONTROL__GRPH_FORMAT__SHIFT)); 1937 break; 1938 case DRM_FORMAT_XRGB4444: 1939 case DRM_FORMAT_ARGB4444: 1940 fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) | 1941 (GRPH_FORMAT_ARGB4444 << GRPH_CONTROL__GRPH_FORMAT__SHIFT)); 1942 #ifdef __BIG_ENDIAN 1943 fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT); 1944 #endif 1945 break; 1946 case DRM_FORMAT_XRGB1555: 1947 case DRM_FORMAT_ARGB1555: 1948 fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) | 1949 (GRPH_FORMAT_ARGB1555 << GRPH_CONTROL__GRPH_FORMAT__SHIFT)); 1950 #ifdef __BIG_ENDIAN 1951 fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT); 1952 #endif 1953 break; 1954 case DRM_FORMAT_BGRX5551: 1955 case DRM_FORMAT_BGRA5551: 1956 fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) | 1957 (GRPH_FORMAT_BGRA5551 << GRPH_CONTROL__GRPH_FORMAT__SHIFT)); 1958 #ifdef __BIG_ENDIAN 1959 fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT); 1960 #endif 1961 break; 1962 case DRM_FORMAT_RGB565: 1963 fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) | 1964 (GRPH_FORMAT_ARGB565 << GRPH_CONTROL__GRPH_FORMAT__SHIFT)); 1965 #ifdef __BIG_ENDIAN 1966 fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT); 1967 #endif 1968 break; 1969 case DRM_FORMAT_XRGB8888: 1970 case DRM_FORMAT_ARGB8888: 1971 fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) | 1972 (GRPH_FORMAT_ARGB8888 << GRPH_CONTROL__GRPH_FORMAT__SHIFT)); 1973 #ifdef __BIG_ENDIAN 1974 fb_swap = (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT); 1975 #endif 1976 break; 1977 case DRM_FORMAT_XRGB2101010: 1978 case DRM_FORMAT_ARGB2101010: 1979 fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) | 1980 (GRPH_FORMAT_ARGB2101010 << GRPH_CONTROL__GRPH_FORMAT__SHIFT)); 1981 #ifdef __BIG_ENDIAN 1982 fb_swap = (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT); 1983 #endif 1984 /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */ 1985 bypass_lut = true; 1986 break; 1987 case DRM_FORMAT_BGRX1010102: 1988 case DRM_FORMAT_BGRA1010102: 1989 fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) | 1990 (GRPH_FORMAT_BGRA1010102 << GRPH_CONTROL__GRPH_FORMAT__SHIFT)); 1991 #ifdef __BIG_ENDIAN 1992 fb_swap = (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT); 1993 #endif 1994 /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */ 1995 bypass_lut = true; 1996 break; 1997 case DRM_FORMAT_XBGR8888: 1998 case DRM_FORMAT_ABGR8888: 1999 fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) | 2000 (GRPH_FORMAT_ARGB8888 << GRPH_CONTROL__GRPH_FORMAT__SHIFT)); 2001 fb_swap = ((GRPH_RED_SEL_B << GRPH_SWAP_CNTL__GRPH_RED_CROSSBAR__SHIFT) | 2002 (GRPH_BLUE_SEL_R << GRPH_SWAP_CNTL__GRPH_BLUE_CROSSBAR__SHIFT)); 2003 #ifdef __BIG_ENDIAN 2004 fb_swap |= (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT); 2005 #endif 2006 break; 2007 default: 2008 DRM_ERROR("Unsupported screen format %p4cc\n", 2009 &target_fb->format->format); 2010 return -EINVAL; 2011 } 2012 2013 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_2D_TILED_THIN1) { 2014 unsigned bankw, bankh, mtaspect, tile_split, num_banks; 2015 2016 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH); 2017 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT); 2018 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT); 2019 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT); 2020 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS); 2021 2022 fb_format |= (num_banks << GRPH_CONTROL__GRPH_NUM_BANKS__SHIFT); 2023 fb_format |= (GRPH_ARRAY_2D_TILED_THIN1 << GRPH_CONTROL__GRPH_ARRAY_MODE__SHIFT); 2024 fb_format |= (tile_split << GRPH_CONTROL__GRPH_TILE_SPLIT__SHIFT); 2025 fb_format |= (bankw << GRPH_CONTROL__GRPH_BANK_WIDTH__SHIFT); 2026 fb_format |= (bankh << GRPH_CONTROL__GRPH_BANK_HEIGHT__SHIFT); 2027 fb_format |= (mtaspect << GRPH_CONTROL__GRPH_MACRO_TILE_ASPECT__SHIFT); 2028 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_1D_TILED_THIN1) { 2029 fb_format |= (GRPH_ARRAY_1D_TILED_THIN1 << GRPH_CONTROL__GRPH_ARRAY_MODE__SHIFT); 2030 } 2031 2032 pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG); 2033 fb_format |= (pipe_config << GRPH_CONTROL__GRPH_PIPE_CONFIG__SHIFT); 2034 2035 dce_v6_0_vga_enable(crtc, false); 2036 2037 /* Make sure surface address is updated at vertical blank rather than 2038 * horizontal blank 2039 */ 2040 WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, 0); 2041 2042 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, 2043 upper_32_bits(fb_location)); 2044 WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, 2045 upper_32_bits(fb_location)); 2046 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset, 2047 (u32)fb_location & GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK); 2048 WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset, 2049 (u32) fb_location & GRPH_SECONDARY_SURFACE_ADDRESS__GRPH_SECONDARY_SURFACE_ADDRESS_MASK); 2050 WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format); 2051 WREG32(mmGRPH_SWAP_CNTL + amdgpu_crtc->crtc_offset, fb_swap); 2052 2053 /* 2054 * The LUT only has 256 slots for indexing by a 8 bpc fb. Bypass the LUT 2055 * for > 8 bpc scanout to avoid truncation of fb indices to 8 msb's, to 2056 * retain the full precision throughout the pipeline. 2057 */ 2058 WREG32_P(mmGRPH_LUT_10BIT_BYPASS + amdgpu_crtc->crtc_offset, 2059 (bypass_lut ? GRPH_LUT_10BIT_BYPASS__GRPH_LUT_10BIT_BYPASS_EN_MASK : 0), 2060 ~GRPH_LUT_10BIT_BYPASS__GRPH_LUT_10BIT_BYPASS_EN_MASK); 2061 2062 if (bypass_lut) 2063 DRM_DEBUG_KMS("Bypassing hardware LUT due to 10 bit fb scanout.\n"); 2064 2065 WREG32(mmGRPH_SURFACE_OFFSET_X + amdgpu_crtc->crtc_offset, 0); 2066 WREG32(mmGRPH_SURFACE_OFFSET_Y + amdgpu_crtc->crtc_offset, 0); 2067 WREG32(mmGRPH_X_START + amdgpu_crtc->crtc_offset, 0); 2068 WREG32(mmGRPH_Y_START + amdgpu_crtc->crtc_offset, 0); 2069 WREG32(mmGRPH_X_END + amdgpu_crtc->crtc_offset, target_fb->width); 2070 WREG32(mmGRPH_Y_END + amdgpu_crtc->crtc_offset, target_fb->height); 2071 2072 fb_pitch_pixels = target_fb->pitches[0] / target_fb->format->cpp[0]; 2073 WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset, fb_pitch_pixels); 2074 2075 dce_v6_0_grph_enable(crtc, true); 2076 2077 WREG32(mmDESKTOP_HEIGHT + amdgpu_crtc->crtc_offset, 2078 target_fb->height); 2079 x &= ~3; 2080 y &= ~1; 2081 WREG32(mmVIEWPORT_START + amdgpu_crtc->crtc_offset, 2082 (x << 16) | y); 2083 viewport_w = crtc->mode.hdisplay; 2084 viewport_h = (crtc->mode.vdisplay + 1) & ~1; 2085 2086 WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset, 2087 (viewport_w << 16) | viewport_h); 2088 2089 /* set pageflip to happen anywhere in vblank interval */ 2090 WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0); 2091 2092 if (!atomic && fb && fb != crtc->primary->fb) { 2093 abo = gem_to_amdgpu_bo(fb->obj[0]); 2094 r = amdgpu_bo_reserve(abo, true); 2095 if (unlikely(r != 0)) 2096 return r; 2097 amdgpu_bo_unpin(abo); 2098 amdgpu_bo_unreserve(abo); 2099 } 2100 2101 /* Bytes per pixel may have changed */ 2102 dce_v6_0_bandwidth_update(adev); 2103 2104 return 0; 2105 2106 } 2107 2108 static void dce_v6_0_set_interleave(struct drm_crtc *crtc, 2109 struct drm_display_mode *mode) 2110 { 2111 struct drm_device *dev = crtc->dev; 2112 struct amdgpu_device *adev = drm_to_adev(dev); 2113 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2114 2115 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 2116 WREG32(mmDATA_FORMAT + amdgpu_crtc->crtc_offset, 2117 DATA_FORMAT__INTERLEAVE_EN_MASK); 2118 else 2119 WREG32(mmDATA_FORMAT + amdgpu_crtc->crtc_offset, 0); 2120 } 2121 2122 static void dce_v6_0_crtc_load_lut(struct drm_crtc *crtc) 2123 { 2124 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2125 struct drm_device *dev = crtc->dev; 2126 struct amdgpu_device *adev = drm_to_adev(dev); 2127 u16 *r, *g, *b; 2128 int i; 2129 2130 DRM_DEBUG_KMS("%d\n", amdgpu_crtc->crtc_id); 2131 2132 WREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset, 2133 ((INPUT_CSC_BYPASS << INPUT_CSC_CONTROL__INPUT_CSC_GRPH_MODE__SHIFT) | 2134 (INPUT_CSC_BYPASS << INPUT_CSC_CONTROL__INPUT_CSC_OVL_MODE__SHIFT))); 2135 WREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset, 2136 PRESCALE_GRPH_CONTROL__GRPH_PRESCALE_BYPASS_MASK); 2137 WREG32(mmPRESCALE_OVL_CONTROL + amdgpu_crtc->crtc_offset, 2138 PRESCALE_OVL_CONTROL__OVL_PRESCALE_BYPASS_MASK); 2139 WREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset, 2140 ((INPUT_GAMMA_USE_LUT << INPUT_GAMMA_CONTROL__GRPH_INPUT_GAMMA_MODE__SHIFT) | 2141 (INPUT_GAMMA_USE_LUT << INPUT_GAMMA_CONTROL__OVL_INPUT_GAMMA_MODE__SHIFT))); 2142 2143 WREG32(mmDC_LUT_CONTROL + amdgpu_crtc->crtc_offset, 0); 2144 2145 WREG32(mmDC_LUT_BLACK_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0); 2146 WREG32(mmDC_LUT_BLACK_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0); 2147 WREG32(mmDC_LUT_BLACK_OFFSET_RED + amdgpu_crtc->crtc_offset, 0); 2148 2149 WREG32(mmDC_LUT_WHITE_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0xffff); 2150 WREG32(mmDC_LUT_WHITE_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0xffff); 2151 WREG32(mmDC_LUT_WHITE_OFFSET_RED + amdgpu_crtc->crtc_offset, 0xffff); 2152 2153 WREG32(mmDC_LUT_RW_MODE + amdgpu_crtc->crtc_offset, 0); 2154 WREG32(mmDC_LUT_WRITE_EN_MASK + amdgpu_crtc->crtc_offset, 0x00000007); 2155 2156 WREG32(mmDC_LUT_RW_INDEX + amdgpu_crtc->crtc_offset, 0); 2157 r = crtc->gamma_store; 2158 g = r + crtc->gamma_size; 2159 b = g + crtc->gamma_size; 2160 for (i = 0; i < 256; i++) { 2161 WREG32(mmDC_LUT_30_COLOR + amdgpu_crtc->crtc_offset, 2162 ((*r++ & 0xffc0) << 14) | 2163 ((*g++ & 0xffc0) << 4) | 2164 (*b++ >> 6)); 2165 } 2166 2167 WREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset, 2168 ((DEGAMMA_BYPASS << DEGAMMA_CONTROL__GRPH_DEGAMMA_MODE__SHIFT) | 2169 (DEGAMMA_BYPASS << DEGAMMA_CONTROL__OVL_DEGAMMA_MODE__SHIFT) | 2170 (DEGAMMA_BYPASS << DEGAMMA_CONTROL__ICON_DEGAMMA_MODE__SHIFT) | 2171 (DEGAMMA_BYPASS << DEGAMMA_CONTROL__CURSOR_DEGAMMA_MODE__SHIFT))); 2172 WREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset, 2173 ((GAMUT_REMAP_BYPASS << GAMUT_REMAP_CONTROL__GRPH_GAMUT_REMAP_MODE__SHIFT) | 2174 (GAMUT_REMAP_BYPASS << GAMUT_REMAP_CONTROL__OVL_GAMUT_REMAP_MODE__SHIFT))); 2175 WREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset, 2176 ((REGAMMA_BYPASS << REGAMMA_CONTROL__GRPH_REGAMMA_MODE__SHIFT) | 2177 (REGAMMA_BYPASS << REGAMMA_CONTROL__OVL_REGAMMA_MODE__SHIFT))); 2178 WREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset, 2179 ((OUTPUT_CSC_BYPASS << OUTPUT_CSC_CONTROL__OUTPUT_CSC_GRPH_MODE__SHIFT) | 2180 (OUTPUT_CSC_BYPASS << OUTPUT_CSC_CONTROL__OUTPUT_CSC_OVL_MODE__SHIFT))); 2181 /* XXX match this to the depth of the crtc fmt block, move to modeset? */ 2182 WREG32(0x1a50 + amdgpu_crtc->crtc_offset, 0); 2183 2184 2185 } 2186 2187 static int dce_v6_0_pick_dig_encoder(struct drm_encoder *encoder) 2188 { 2189 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 2190 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 2191 2192 switch (amdgpu_encoder->encoder_id) { 2193 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: 2194 return dig->linkb ? 1 : 0; 2195 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: 2196 return dig->linkb ? 3 : 2; 2197 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: 2198 return dig->linkb ? 5 : 4; 2199 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3: 2200 return 6; 2201 default: 2202 DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder->encoder_id); 2203 return 0; 2204 } 2205 } 2206 2207 /** 2208 * dce_v6_0_pick_pll - Allocate a PPLL for use by the crtc. 2209 * 2210 * @crtc: drm crtc 2211 * 2212 * Returns the PPLL (Pixel PLL) to be used by the crtc. For DP monitors 2213 * a single PPLL can be used for all DP crtcs/encoders. For non-DP 2214 * monitors a dedicated PPLL must be used. If a particular board has 2215 * an external DP PLL, return ATOM_PPLL_INVALID to skip PLL programming 2216 * as there is no need to program the PLL itself. If we are not able to 2217 * allocate a PLL, return ATOM_PPLL_INVALID to skip PLL programming to 2218 * avoid messing up an existing monitor. 2219 * 2220 * 2221 */ 2222 static u32 dce_v6_0_pick_pll(struct drm_crtc *crtc) 2223 { 2224 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2225 struct drm_device *dev = crtc->dev; 2226 struct amdgpu_device *adev = drm_to_adev(dev); 2227 u32 pll_in_use; 2228 int pll; 2229 2230 if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder))) { 2231 if (adev->clock.dp_extclk) 2232 /* skip PPLL programming if using ext clock */ 2233 return ATOM_PPLL_INVALID; 2234 else 2235 return ATOM_PPLL0; 2236 } else { 2237 /* use the same PPLL for all monitors with the same clock */ 2238 pll = amdgpu_pll_get_shared_nondp_ppll(crtc); 2239 if (pll != ATOM_PPLL_INVALID) 2240 return pll; 2241 } 2242 2243 /* PPLL1, and PPLL2 */ 2244 pll_in_use = amdgpu_pll_get_use_mask(crtc); 2245 if (!(pll_in_use & (1 << ATOM_PPLL2))) 2246 return ATOM_PPLL2; 2247 if (!(pll_in_use & (1 << ATOM_PPLL1))) 2248 return ATOM_PPLL1; 2249 DRM_ERROR("unable to allocate a PPLL\n"); 2250 return ATOM_PPLL_INVALID; 2251 } 2252 2253 static void dce_v6_0_lock_cursor(struct drm_crtc *crtc, bool lock) 2254 { 2255 struct amdgpu_device *adev = drm_to_adev(crtc->dev); 2256 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2257 uint32_t cur_lock; 2258 2259 cur_lock = RREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset); 2260 if (lock) 2261 cur_lock |= CUR_UPDATE__CURSOR_UPDATE_LOCK_MASK; 2262 else 2263 cur_lock &= ~CUR_UPDATE__CURSOR_UPDATE_LOCK_MASK; 2264 WREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset, cur_lock); 2265 } 2266 2267 static void dce_v6_0_hide_cursor(struct drm_crtc *crtc) 2268 { 2269 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2270 struct amdgpu_device *adev = drm_to_adev(crtc->dev); 2271 2272 WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, 2273 (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) | 2274 (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT)); 2275 } 2276 2277 static void dce_v6_0_show_cursor(struct drm_crtc *crtc) 2278 { 2279 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2280 struct amdgpu_device *adev = drm_to_adev(crtc->dev); 2281 2282 WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, 2283 upper_32_bits(amdgpu_crtc->cursor_addr)); 2284 WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset, 2285 lower_32_bits(amdgpu_crtc->cursor_addr)); 2286 2287 WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, 2288 CUR_CONTROL__CURSOR_EN_MASK | 2289 (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) | 2290 (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT)); 2291 } 2292 2293 static int dce_v6_0_cursor_move_locked(struct drm_crtc *crtc, 2294 int x, int y) 2295 { 2296 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2297 struct amdgpu_device *adev = drm_to_adev(crtc->dev); 2298 int xorigin = 0, yorigin = 0; 2299 2300 int w = amdgpu_crtc->cursor_width; 2301 2302 amdgpu_crtc->cursor_x = x; 2303 amdgpu_crtc->cursor_y = y; 2304 2305 /* avivo cursor are offset into the total surface */ 2306 x += crtc->x; 2307 y += crtc->y; 2308 DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y); 2309 2310 if (x < 0) { 2311 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1); 2312 x = 0; 2313 } 2314 if (y < 0) { 2315 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1); 2316 y = 0; 2317 } 2318 2319 WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y); 2320 WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin); 2321 WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset, 2322 ((w - 1) << 16) | (amdgpu_crtc->cursor_height - 1)); 2323 2324 return 0; 2325 } 2326 2327 static int dce_v6_0_crtc_cursor_move(struct drm_crtc *crtc, 2328 int x, int y) 2329 { 2330 int ret; 2331 2332 dce_v6_0_lock_cursor(crtc, true); 2333 ret = dce_v6_0_cursor_move_locked(crtc, x, y); 2334 dce_v6_0_lock_cursor(crtc, false); 2335 2336 return ret; 2337 } 2338 2339 static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc, 2340 struct drm_file *file_priv, 2341 uint32_t handle, 2342 uint32_t width, 2343 uint32_t height, 2344 int32_t hot_x, 2345 int32_t hot_y) 2346 { 2347 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2348 struct drm_gem_object *obj; 2349 struct amdgpu_bo *aobj; 2350 int ret; 2351 2352 if (!handle) { 2353 /* turn off cursor */ 2354 dce_v6_0_hide_cursor(crtc); 2355 obj = NULL; 2356 goto unpin; 2357 } 2358 2359 if ((width > amdgpu_crtc->max_cursor_width) || 2360 (height > amdgpu_crtc->max_cursor_height)) { 2361 DRM_ERROR("bad cursor width or height %d x %d\n", width, height); 2362 return -EINVAL; 2363 } 2364 2365 obj = drm_gem_object_lookup(file_priv, handle); 2366 if (!obj) { 2367 DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, amdgpu_crtc->crtc_id); 2368 return -ENOENT; 2369 } 2370 2371 aobj = gem_to_amdgpu_bo(obj); 2372 ret = amdgpu_bo_reserve(aobj, false); 2373 if (ret != 0) { 2374 drm_gem_object_put(obj); 2375 return ret; 2376 } 2377 2378 aobj->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; 2379 ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM); 2380 amdgpu_bo_unreserve(aobj); 2381 if (ret) { 2382 DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret); 2383 drm_gem_object_put(obj); 2384 return ret; 2385 } 2386 amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj); 2387 2388 dce_v6_0_lock_cursor(crtc, true); 2389 2390 if (width != amdgpu_crtc->cursor_width || 2391 height != amdgpu_crtc->cursor_height || 2392 hot_x != amdgpu_crtc->cursor_hot_x || 2393 hot_y != amdgpu_crtc->cursor_hot_y) { 2394 int x, y; 2395 2396 x = amdgpu_crtc->cursor_x + amdgpu_crtc->cursor_hot_x - hot_x; 2397 y = amdgpu_crtc->cursor_y + amdgpu_crtc->cursor_hot_y - hot_y; 2398 2399 dce_v6_0_cursor_move_locked(crtc, x, y); 2400 2401 amdgpu_crtc->cursor_width = width; 2402 amdgpu_crtc->cursor_height = height; 2403 amdgpu_crtc->cursor_hot_x = hot_x; 2404 amdgpu_crtc->cursor_hot_y = hot_y; 2405 } 2406 2407 dce_v6_0_show_cursor(crtc); 2408 dce_v6_0_lock_cursor(crtc, false); 2409 2410 unpin: 2411 if (amdgpu_crtc->cursor_bo) { 2412 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo); 2413 ret = amdgpu_bo_reserve(aobj, true); 2414 if (likely(ret == 0)) { 2415 amdgpu_bo_unpin(aobj); 2416 amdgpu_bo_unreserve(aobj); 2417 } 2418 drm_gem_object_put(amdgpu_crtc->cursor_bo); 2419 } 2420 2421 amdgpu_crtc->cursor_bo = obj; 2422 return 0; 2423 } 2424 2425 static void dce_v6_0_cursor_reset(struct drm_crtc *crtc) 2426 { 2427 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2428 2429 if (amdgpu_crtc->cursor_bo) { 2430 dce_v6_0_lock_cursor(crtc, true); 2431 2432 dce_v6_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x, 2433 amdgpu_crtc->cursor_y); 2434 2435 dce_v6_0_show_cursor(crtc); 2436 dce_v6_0_lock_cursor(crtc, false); 2437 } 2438 } 2439 2440 static int dce_v6_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, 2441 u16 *blue, uint32_t size, 2442 struct drm_modeset_acquire_ctx *ctx) 2443 { 2444 dce_v6_0_crtc_load_lut(crtc); 2445 2446 return 0; 2447 } 2448 2449 static void dce_v6_0_crtc_destroy(struct drm_crtc *crtc) 2450 { 2451 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2452 2453 drm_crtc_cleanup(crtc); 2454 kfree(amdgpu_crtc); 2455 } 2456 2457 static const struct drm_crtc_funcs dce_v6_0_crtc_funcs = { 2458 .cursor_set2 = dce_v6_0_crtc_cursor_set2, 2459 .cursor_move = dce_v6_0_crtc_cursor_move, 2460 .gamma_set = dce_v6_0_crtc_gamma_set, 2461 .set_config = amdgpu_display_crtc_set_config, 2462 .destroy = dce_v6_0_crtc_destroy, 2463 .page_flip_target = amdgpu_display_crtc_page_flip_target, 2464 .get_vblank_counter = amdgpu_get_vblank_counter_kms, 2465 .enable_vblank = amdgpu_enable_vblank_kms, 2466 .disable_vblank = amdgpu_disable_vblank_kms, 2467 .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp, 2468 }; 2469 2470 static void dce_v6_0_crtc_dpms(struct drm_crtc *crtc, int mode) 2471 { 2472 struct drm_device *dev = crtc->dev; 2473 struct amdgpu_device *adev = drm_to_adev(dev); 2474 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2475 unsigned type; 2476 2477 switch (mode) { 2478 case DRM_MODE_DPMS_ON: 2479 amdgpu_crtc->enabled = true; 2480 amdgpu_atombios_crtc_enable(crtc, ATOM_ENABLE); 2481 amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE); 2482 /* Make sure VBLANK and PFLIP interrupts are still enabled */ 2483 type = amdgpu_display_crtc_idx_to_irq_type(adev, 2484 amdgpu_crtc->crtc_id); 2485 amdgpu_irq_update(adev, &adev->crtc_irq, type); 2486 amdgpu_irq_update(adev, &adev->pageflip_irq, type); 2487 drm_crtc_vblank_on(crtc); 2488 dce_v6_0_crtc_load_lut(crtc); 2489 break; 2490 case DRM_MODE_DPMS_STANDBY: 2491 case DRM_MODE_DPMS_SUSPEND: 2492 case DRM_MODE_DPMS_OFF: 2493 drm_crtc_vblank_off(crtc); 2494 if (amdgpu_crtc->enabled) 2495 amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE); 2496 amdgpu_atombios_crtc_enable(crtc, ATOM_DISABLE); 2497 amdgpu_crtc->enabled = false; 2498 break; 2499 } 2500 /* adjust pm to dpms */ 2501 amdgpu_dpm_compute_clocks(adev); 2502 } 2503 2504 static void dce_v6_0_crtc_prepare(struct drm_crtc *crtc) 2505 { 2506 /* disable crtc pair power gating before programming */ 2507 amdgpu_atombios_crtc_powergate(crtc, ATOM_DISABLE); 2508 amdgpu_atombios_crtc_lock(crtc, ATOM_ENABLE); 2509 dce_v6_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); 2510 } 2511 2512 static void dce_v6_0_crtc_commit(struct drm_crtc *crtc) 2513 { 2514 dce_v6_0_crtc_dpms(crtc, DRM_MODE_DPMS_ON); 2515 amdgpu_atombios_crtc_lock(crtc, ATOM_DISABLE); 2516 } 2517 2518 static void dce_v6_0_crtc_disable(struct drm_crtc *crtc) 2519 { 2520 2521 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2522 struct drm_device *dev = crtc->dev; 2523 struct amdgpu_device *adev = drm_to_adev(dev); 2524 struct amdgpu_atom_ss ss; 2525 int i; 2526 2527 dce_v6_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); 2528 if (crtc->primary->fb) { 2529 int r; 2530 struct amdgpu_bo *abo; 2531 2532 abo = gem_to_amdgpu_bo(crtc->primary->fb->obj[0]); 2533 r = amdgpu_bo_reserve(abo, true); 2534 if (unlikely(r)) 2535 DRM_ERROR("failed to reserve abo before unpin\n"); 2536 else { 2537 amdgpu_bo_unpin(abo); 2538 amdgpu_bo_unreserve(abo); 2539 } 2540 } 2541 /* disable the GRPH */ 2542 dce_v6_0_grph_enable(crtc, false); 2543 2544 amdgpu_atombios_crtc_powergate(crtc, ATOM_ENABLE); 2545 2546 for (i = 0; i < adev->mode_info.num_crtc; i++) { 2547 if (adev->mode_info.crtcs[i] && 2548 adev->mode_info.crtcs[i]->enabled && 2549 i != amdgpu_crtc->crtc_id && 2550 amdgpu_crtc->pll_id == adev->mode_info.crtcs[i]->pll_id) { 2551 /* one other crtc is using this pll don't turn 2552 * off the pll 2553 */ 2554 goto done; 2555 } 2556 } 2557 2558 switch (amdgpu_crtc->pll_id) { 2559 case ATOM_PPLL1: 2560 case ATOM_PPLL2: 2561 /* disable the ppll */ 2562 amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id, 2563 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss); 2564 break; 2565 default: 2566 break; 2567 } 2568 done: 2569 amdgpu_crtc->pll_id = ATOM_PPLL_INVALID; 2570 amdgpu_crtc->adjusted_clock = 0; 2571 amdgpu_crtc->encoder = NULL; 2572 amdgpu_crtc->connector = NULL; 2573 } 2574 2575 static int dce_v6_0_crtc_mode_set(struct drm_crtc *crtc, 2576 struct drm_display_mode *mode, 2577 struct drm_display_mode *adjusted_mode, 2578 int x, int y, struct drm_framebuffer *old_fb) 2579 { 2580 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2581 2582 if (!amdgpu_crtc->adjusted_clock) 2583 return -EINVAL; 2584 2585 amdgpu_atombios_crtc_set_pll(crtc, adjusted_mode); 2586 amdgpu_atombios_crtc_set_dtd_timing(crtc, adjusted_mode); 2587 dce_v6_0_crtc_do_set_base(crtc, old_fb, x, y, 0); 2588 amdgpu_atombios_crtc_overscan_setup(crtc, mode, adjusted_mode); 2589 amdgpu_atombios_crtc_scaler_setup(crtc); 2590 dce_v6_0_cursor_reset(crtc); 2591 /* update the hw version fpr dpm */ 2592 amdgpu_crtc->hw_mode = *adjusted_mode; 2593 2594 return 0; 2595 } 2596 2597 static bool dce_v6_0_crtc_mode_fixup(struct drm_crtc *crtc, 2598 const struct drm_display_mode *mode, 2599 struct drm_display_mode *adjusted_mode) 2600 { 2601 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2602 struct drm_device *dev = crtc->dev; 2603 struct drm_encoder *encoder; 2604 2605 /* assign the encoder to the amdgpu crtc to avoid repeated lookups later */ 2606 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 2607 if (encoder->crtc == crtc) { 2608 amdgpu_crtc->encoder = encoder; 2609 amdgpu_crtc->connector = amdgpu_get_connector_for_encoder(encoder); 2610 break; 2611 } 2612 } 2613 if ((amdgpu_crtc->encoder == NULL) || (amdgpu_crtc->connector == NULL)) { 2614 amdgpu_crtc->encoder = NULL; 2615 amdgpu_crtc->connector = NULL; 2616 return false; 2617 } 2618 if (!amdgpu_display_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode)) 2619 return false; 2620 if (amdgpu_atombios_crtc_prepare_pll(crtc, adjusted_mode)) 2621 return false; 2622 /* pick pll */ 2623 amdgpu_crtc->pll_id = dce_v6_0_pick_pll(crtc); 2624 /* if we can't get a PPLL for a non-DP encoder, fail */ 2625 if ((amdgpu_crtc->pll_id == ATOM_PPLL_INVALID) && 2626 !ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder))) 2627 return false; 2628 2629 return true; 2630 } 2631 2632 static int dce_v6_0_crtc_set_base(struct drm_crtc *crtc, int x, int y, 2633 struct drm_framebuffer *old_fb) 2634 { 2635 return dce_v6_0_crtc_do_set_base(crtc, old_fb, x, y, 0); 2636 } 2637 2638 static int dce_v6_0_crtc_set_base_atomic(struct drm_crtc *crtc, 2639 struct drm_framebuffer *fb, 2640 int x, int y, enum mode_set_atomic state) 2641 { 2642 return dce_v6_0_crtc_do_set_base(crtc, fb, x, y, 1); 2643 } 2644 2645 static const struct drm_crtc_helper_funcs dce_v6_0_crtc_helper_funcs = { 2646 .dpms = dce_v6_0_crtc_dpms, 2647 .mode_fixup = dce_v6_0_crtc_mode_fixup, 2648 .mode_set = dce_v6_0_crtc_mode_set, 2649 .mode_set_base = dce_v6_0_crtc_set_base, 2650 .mode_set_base_atomic = dce_v6_0_crtc_set_base_atomic, 2651 .prepare = dce_v6_0_crtc_prepare, 2652 .commit = dce_v6_0_crtc_commit, 2653 .disable = dce_v6_0_crtc_disable, 2654 .get_scanout_position = amdgpu_crtc_get_scanout_position, 2655 }; 2656 2657 static void dce_v6_0_panic_flush(struct drm_plane *plane) 2658 { 2659 struct drm_framebuffer *fb; 2660 struct amdgpu_crtc *amdgpu_crtc; 2661 struct amdgpu_device *adev; 2662 uint32_t fb_format; 2663 2664 if (!plane->fb) 2665 return; 2666 2667 fb = plane->fb; 2668 amdgpu_crtc = to_amdgpu_crtc(plane->crtc); 2669 adev = drm_to_adev(fb->dev); 2670 2671 /* Disable DC tiling */ 2672 fb_format = RREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset); 2673 fb_format &= ~GRPH_CONTROL__GRPH_ARRAY_MODE_MASK; 2674 WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format); 2675 2676 } 2677 2678 static const struct drm_plane_helper_funcs dce_v6_0_drm_primary_plane_helper_funcs = { 2679 .get_scanout_buffer = amdgpu_display_get_scanout_buffer, 2680 .panic_flush = dce_v6_0_panic_flush, 2681 }; 2682 2683 static int dce_v6_0_crtc_init(struct amdgpu_device *adev, int index) 2684 { 2685 struct amdgpu_crtc *amdgpu_crtc; 2686 2687 amdgpu_crtc = kzalloc(sizeof(struct amdgpu_crtc) + 2688 (AMDGPUFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL); 2689 if (amdgpu_crtc == NULL) 2690 return -ENOMEM; 2691 2692 drm_crtc_init(adev_to_drm(adev), &amdgpu_crtc->base, &dce_v6_0_crtc_funcs); 2693 2694 drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256); 2695 amdgpu_crtc->crtc_id = index; 2696 adev->mode_info.crtcs[index] = amdgpu_crtc; 2697 2698 amdgpu_crtc->max_cursor_width = CURSOR_WIDTH; 2699 amdgpu_crtc->max_cursor_height = CURSOR_HEIGHT; 2700 adev_to_drm(adev)->mode_config.cursor_width = amdgpu_crtc->max_cursor_width; 2701 adev_to_drm(adev)->mode_config.cursor_height = amdgpu_crtc->max_cursor_height; 2702 2703 amdgpu_crtc->crtc_offset = crtc_offsets[amdgpu_crtc->crtc_id]; 2704 2705 amdgpu_crtc->pll_id = ATOM_PPLL_INVALID; 2706 amdgpu_crtc->adjusted_clock = 0; 2707 amdgpu_crtc->encoder = NULL; 2708 amdgpu_crtc->connector = NULL; 2709 drm_crtc_helper_add(&amdgpu_crtc->base, &dce_v6_0_crtc_helper_funcs); 2710 drm_plane_helper_add(amdgpu_crtc->base.primary, &dce_v6_0_drm_primary_plane_helper_funcs); 2711 2712 return 0; 2713 } 2714 2715 static int dce_v6_0_early_init(struct amdgpu_ip_block *ip_block) 2716 { 2717 struct amdgpu_device *adev = ip_block->adev; 2718 2719 adev->audio_endpt_rreg = &dce_v6_0_audio_endpt_rreg; 2720 adev->audio_endpt_wreg = &dce_v6_0_audio_endpt_wreg; 2721 2722 dce_v6_0_set_display_funcs(adev); 2723 2724 adev->mode_info.num_crtc = dce_v6_0_get_num_crtc(adev); 2725 2726 switch (adev->asic_type) { 2727 case CHIP_TAHITI: 2728 case CHIP_PITCAIRN: 2729 case CHIP_VERDE: 2730 adev->mode_info.num_hpd = 6; 2731 adev->mode_info.num_dig = 6; 2732 break; 2733 case CHIP_OLAND: 2734 adev->mode_info.num_hpd = 2; 2735 adev->mode_info.num_dig = 2; 2736 break; 2737 default: 2738 return -EINVAL; 2739 } 2740 2741 dce_v6_0_set_irq_funcs(adev); 2742 2743 return 0; 2744 } 2745 2746 static int dce_v6_0_sw_init(struct amdgpu_ip_block *ip_block) 2747 { 2748 int r, i; 2749 struct amdgpu_device *adev = ip_block->adev; 2750 2751 for (i = 0; i < adev->mode_info.num_crtc; i++) { 2752 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i + 1, &adev->crtc_irq); 2753 if (r) 2754 return r; 2755 } 2756 2757 for (i = 8; i < 20; i += 2) { 2758 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i, &adev->pageflip_irq); 2759 if (r) 2760 return r; 2761 } 2762 2763 /* HPD hotplug */ 2764 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 42, &adev->hpd_irq); 2765 if (r) 2766 return r; 2767 2768 adev->mode_info.mode_config_initialized = true; 2769 2770 adev_to_drm(adev)->mode_config.funcs = &amdgpu_mode_funcs; 2771 adev_to_drm(adev)->mode_config.async_page_flip = true; 2772 adev_to_drm(adev)->mode_config.max_width = 16384; 2773 adev_to_drm(adev)->mode_config.max_height = 16384; 2774 adev_to_drm(adev)->mode_config.preferred_depth = 24; 2775 adev_to_drm(adev)->mode_config.prefer_shadow = 1; 2776 adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true; 2777 2778 r = amdgpu_display_modeset_create_props(adev); 2779 if (r) 2780 return r; 2781 2782 adev_to_drm(adev)->mode_config.max_width = 16384; 2783 adev_to_drm(adev)->mode_config.max_height = 16384; 2784 2785 /* allocate crtcs */ 2786 for (i = 0; i < adev->mode_info.num_crtc; i++) { 2787 r = dce_v6_0_crtc_init(adev, i); 2788 if (r) 2789 return r; 2790 } 2791 2792 if (amdgpu_atombios_get_connector_info_from_object_table(adev)) 2793 amdgpu_display_print_display_setup(adev_to_drm(adev)); 2794 else 2795 return -EINVAL; 2796 2797 /* setup afmt */ 2798 r = dce_v6_0_afmt_init(adev); 2799 if (r) 2800 return r; 2801 2802 r = dce_v6_0_audio_init(adev); 2803 if (r) 2804 return r; 2805 2806 /* Disable vblank IRQs aggressively for power-saving */ 2807 /* XXX: can this be enabled for DC? */ 2808 adev_to_drm(adev)->vblank_disable_immediate = true; 2809 2810 r = drm_vblank_init(adev_to_drm(adev), adev->mode_info.num_crtc); 2811 if (r) 2812 return r; 2813 2814 /* Pre-DCE11 */ 2815 INIT_DELAYED_WORK(&adev->hotplug_work, 2816 amdgpu_display_hotplug_work_func); 2817 2818 drm_kms_helper_poll_init(adev_to_drm(adev)); 2819 2820 return r; 2821 } 2822 2823 static int dce_v6_0_sw_fini(struct amdgpu_ip_block *ip_block) 2824 { 2825 struct amdgpu_device *adev = ip_block->adev; 2826 2827 drm_edid_free(adev->mode_info.bios_hardcoded_edid); 2828 2829 drm_kms_helper_poll_fini(adev_to_drm(adev)); 2830 2831 dce_v6_0_audio_fini(adev); 2832 dce_v6_0_afmt_fini(adev); 2833 2834 drm_mode_config_cleanup(adev_to_drm(adev)); 2835 adev->mode_info.mode_config_initialized = false; 2836 2837 return 0; 2838 } 2839 2840 static int dce_v6_0_hw_init(struct amdgpu_ip_block *ip_block) 2841 { 2842 int i; 2843 struct amdgpu_device *adev = ip_block->adev; 2844 2845 /* disable vga render */ 2846 dce_v6_0_set_vga_render_state(adev, false); 2847 /* init dig PHYs, disp eng pll */ 2848 amdgpu_atombios_encoder_init_dig(adev); 2849 amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk); 2850 2851 /* initialize hpd */ 2852 dce_v6_0_hpd_init(adev); 2853 2854 for (i = 0; i < adev->mode_info.audio.num_pins; i++) { 2855 dce_v6_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false); 2856 } 2857 2858 dce_v6_0_pageflip_interrupt_init(adev); 2859 2860 return 0; 2861 } 2862 2863 static int dce_v6_0_hw_fini(struct amdgpu_ip_block *ip_block) 2864 { 2865 int i; 2866 struct amdgpu_device *adev = ip_block->adev; 2867 2868 dce_v6_0_hpd_fini(adev); 2869 2870 for (i = 0; i < adev->mode_info.audio.num_pins; i++) { 2871 dce_v6_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false); 2872 } 2873 2874 dce_v6_0_pageflip_interrupt_fini(adev); 2875 2876 flush_delayed_work(&adev->hotplug_work); 2877 2878 return 0; 2879 } 2880 2881 static int dce_v6_0_suspend(struct amdgpu_ip_block *ip_block) 2882 { 2883 struct amdgpu_device *adev = ip_block->adev; 2884 int r; 2885 2886 r = amdgpu_display_suspend_helper(adev); 2887 if (r) 2888 return r; 2889 adev->mode_info.bl_level = 2890 amdgpu_atombios_encoder_get_backlight_level_from_reg(adev); 2891 2892 return dce_v6_0_hw_fini(ip_block); 2893 } 2894 2895 static int dce_v6_0_resume(struct amdgpu_ip_block *ip_block) 2896 { 2897 struct amdgpu_device *adev = ip_block->adev; 2898 int ret; 2899 2900 amdgpu_atombios_encoder_set_backlight_level_to_reg(adev, 2901 adev->mode_info.bl_level); 2902 2903 ret = dce_v6_0_hw_init(ip_block); 2904 2905 /* turn on the BL */ 2906 if (adev->mode_info.bl_encoder) { 2907 u8 bl_level = amdgpu_display_backlight_get_level(adev, 2908 adev->mode_info.bl_encoder); 2909 amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder, 2910 bl_level); 2911 } 2912 if (ret) 2913 return ret; 2914 2915 return amdgpu_display_resume_helper(adev); 2916 } 2917 2918 static bool dce_v6_0_is_idle(struct amdgpu_ip_block *ip_block) 2919 { 2920 return true; 2921 } 2922 2923 static int dce_v6_0_soft_reset(struct amdgpu_ip_block *ip_block) 2924 { 2925 u32 srbm_soft_reset = 0, tmp; 2926 struct amdgpu_device *adev = ip_block->adev; 2927 2928 if (dce_v6_0_is_display_hung(adev)) 2929 srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK; 2930 2931 if (srbm_soft_reset) { 2932 tmp = RREG32(mmSRBM_SOFT_RESET); 2933 tmp |= srbm_soft_reset; 2934 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); 2935 WREG32(mmSRBM_SOFT_RESET, tmp); 2936 tmp = RREG32(mmSRBM_SOFT_RESET); 2937 2938 udelay(50); 2939 2940 tmp &= ~srbm_soft_reset; 2941 WREG32(mmSRBM_SOFT_RESET, tmp); 2942 tmp = RREG32(mmSRBM_SOFT_RESET); 2943 2944 /* Wait a little for things to settle down */ 2945 udelay(50); 2946 } 2947 return 0; 2948 } 2949 2950 static void dce_v6_0_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev, 2951 int crtc, 2952 enum amdgpu_interrupt_state state) 2953 { 2954 u32 reg_block, interrupt_mask; 2955 2956 if (crtc >= adev->mode_info.num_crtc) { 2957 DRM_DEBUG("invalid crtc %d\n", crtc); 2958 return; 2959 } 2960 2961 switch (crtc) { 2962 case 0: 2963 reg_block = CRTC0_REGISTER_OFFSET; 2964 break; 2965 case 1: 2966 reg_block = CRTC1_REGISTER_OFFSET; 2967 break; 2968 case 2: 2969 reg_block = CRTC2_REGISTER_OFFSET; 2970 break; 2971 case 3: 2972 reg_block = CRTC3_REGISTER_OFFSET; 2973 break; 2974 case 4: 2975 reg_block = CRTC4_REGISTER_OFFSET; 2976 break; 2977 case 5: 2978 reg_block = CRTC5_REGISTER_OFFSET; 2979 break; 2980 default: 2981 DRM_DEBUG("invalid crtc %d\n", crtc); 2982 return; 2983 } 2984 2985 switch (state) { 2986 case AMDGPU_IRQ_STATE_DISABLE: 2987 interrupt_mask = RREG32(mmINT_MASK + reg_block); 2988 interrupt_mask &= ~INT_MASK__VBLANK_INT_MASK; 2989 WREG32(mmINT_MASK + reg_block, interrupt_mask); 2990 break; 2991 case AMDGPU_IRQ_STATE_ENABLE: 2992 interrupt_mask = RREG32(mmINT_MASK + reg_block); 2993 interrupt_mask |= INT_MASK__VBLANK_INT_MASK; 2994 WREG32(mmINT_MASK + reg_block, interrupt_mask); 2995 break; 2996 default: 2997 break; 2998 } 2999 } 3000 3001 static void dce_v6_0_set_crtc_vline_interrupt_state(struct amdgpu_device *adev, 3002 int crtc, 3003 enum amdgpu_interrupt_state state) 3004 { 3005 3006 } 3007 3008 static int dce_v6_0_set_hpd_irq_state(struct amdgpu_device *adev, 3009 struct amdgpu_irq_src *src, 3010 unsigned hpd, 3011 enum amdgpu_interrupt_state state) 3012 { 3013 u32 dc_hpd_int_cntl; 3014 3015 if (hpd >= adev->mode_info.num_hpd) { 3016 DRM_DEBUG("invalid hpd %d\n", hpd); 3017 return 0; 3018 } 3019 3020 switch (state) { 3021 case AMDGPU_IRQ_STATE_DISABLE: 3022 dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]); 3023 dc_hpd_int_cntl &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK; 3024 WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], dc_hpd_int_cntl); 3025 break; 3026 case AMDGPU_IRQ_STATE_ENABLE: 3027 dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]); 3028 dc_hpd_int_cntl |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK; 3029 WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], dc_hpd_int_cntl); 3030 break; 3031 default: 3032 break; 3033 } 3034 3035 return 0; 3036 } 3037 3038 static int dce_v6_0_set_crtc_irq_state(struct amdgpu_device *adev, 3039 struct amdgpu_irq_src *src, 3040 unsigned type, 3041 enum amdgpu_interrupt_state state) 3042 { 3043 switch (type) { 3044 case AMDGPU_CRTC_IRQ_VBLANK1: 3045 dce_v6_0_set_crtc_vblank_interrupt_state(adev, 0, state); 3046 break; 3047 case AMDGPU_CRTC_IRQ_VBLANK2: 3048 dce_v6_0_set_crtc_vblank_interrupt_state(adev, 1, state); 3049 break; 3050 case AMDGPU_CRTC_IRQ_VBLANK3: 3051 dce_v6_0_set_crtc_vblank_interrupt_state(adev, 2, state); 3052 break; 3053 case AMDGPU_CRTC_IRQ_VBLANK4: 3054 dce_v6_0_set_crtc_vblank_interrupt_state(adev, 3, state); 3055 break; 3056 case AMDGPU_CRTC_IRQ_VBLANK5: 3057 dce_v6_0_set_crtc_vblank_interrupt_state(adev, 4, state); 3058 break; 3059 case AMDGPU_CRTC_IRQ_VBLANK6: 3060 dce_v6_0_set_crtc_vblank_interrupt_state(adev, 5, state); 3061 break; 3062 case AMDGPU_CRTC_IRQ_VLINE1: 3063 dce_v6_0_set_crtc_vline_interrupt_state(adev, 0, state); 3064 break; 3065 case AMDGPU_CRTC_IRQ_VLINE2: 3066 dce_v6_0_set_crtc_vline_interrupt_state(adev, 1, state); 3067 break; 3068 case AMDGPU_CRTC_IRQ_VLINE3: 3069 dce_v6_0_set_crtc_vline_interrupt_state(adev, 2, state); 3070 break; 3071 case AMDGPU_CRTC_IRQ_VLINE4: 3072 dce_v6_0_set_crtc_vline_interrupt_state(adev, 3, state); 3073 break; 3074 case AMDGPU_CRTC_IRQ_VLINE5: 3075 dce_v6_0_set_crtc_vline_interrupt_state(adev, 4, state); 3076 break; 3077 case AMDGPU_CRTC_IRQ_VLINE6: 3078 dce_v6_0_set_crtc_vline_interrupt_state(adev, 5, state); 3079 break; 3080 default: 3081 break; 3082 } 3083 return 0; 3084 } 3085 3086 static int dce_v6_0_crtc_irq(struct amdgpu_device *adev, 3087 struct amdgpu_irq_src *source, 3088 struct amdgpu_iv_entry *entry) 3089 { 3090 unsigned crtc = entry->src_id - 1; 3091 uint32_t disp_int = RREG32(interrupt_status_offsets[crtc].reg); 3092 unsigned int irq_type = amdgpu_display_crtc_idx_to_irq_type(adev, 3093 crtc); 3094 3095 switch (entry->src_data[0]) { 3096 case 0: /* vblank */ 3097 if (disp_int & interrupt_status_offsets[crtc].vblank) 3098 WREG32(mmVBLANK_STATUS + crtc_offsets[crtc], VBLANK_STATUS__VBLANK_ACK_MASK); 3099 else 3100 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); 3101 3102 if (amdgpu_irq_enabled(adev, source, irq_type)) { 3103 drm_handle_vblank(adev_to_drm(adev), crtc); 3104 } 3105 DRM_DEBUG("IH: D%d vblank\n", crtc + 1); 3106 break; 3107 case 1: /* vline */ 3108 if (disp_int & interrupt_status_offsets[crtc].vline) 3109 WREG32(mmVLINE_STATUS + crtc_offsets[crtc], VLINE_STATUS__VLINE_ACK_MASK); 3110 else 3111 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); 3112 3113 DRM_DEBUG("IH: D%d vline\n", crtc + 1); 3114 break; 3115 default: 3116 DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]); 3117 break; 3118 } 3119 3120 return 0; 3121 } 3122 3123 static int dce_v6_0_set_pageflip_irq_state(struct amdgpu_device *adev, 3124 struct amdgpu_irq_src *src, 3125 unsigned type, 3126 enum amdgpu_interrupt_state state) 3127 { 3128 u32 reg; 3129 3130 if (type >= adev->mode_info.num_crtc) { 3131 DRM_ERROR("invalid pageflip crtc %d\n", type); 3132 return -EINVAL; 3133 } 3134 3135 reg = RREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type]); 3136 if (state == AMDGPU_IRQ_STATE_DISABLE) 3137 WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type], 3138 reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK); 3139 else 3140 WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type], 3141 reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK); 3142 3143 return 0; 3144 } 3145 3146 static int dce_v6_0_pageflip_irq(struct amdgpu_device *adev, 3147 struct amdgpu_irq_src *source, 3148 struct amdgpu_iv_entry *entry) 3149 { 3150 unsigned long flags; 3151 unsigned crtc_id; 3152 struct amdgpu_crtc *amdgpu_crtc; 3153 struct amdgpu_flip_work *works; 3154 3155 crtc_id = (entry->src_id - 8) >> 1; 3156 amdgpu_crtc = adev->mode_info.crtcs[crtc_id]; 3157 3158 if (crtc_id >= adev->mode_info.num_crtc) { 3159 DRM_ERROR("invalid pageflip crtc %d\n", crtc_id); 3160 return -EINVAL; 3161 } 3162 3163 if (RREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id]) & 3164 GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK) 3165 WREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id], 3166 GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK); 3167 3168 /* IRQ could occur when in initial stage */ 3169 if (amdgpu_crtc == NULL) 3170 return 0; 3171 3172 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); 3173 works = amdgpu_crtc->pflip_works; 3174 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED) { 3175 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != " 3176 "AMDGPU_FLIP_SUBMITTED(%d)\n", 3177 amdgpu_crtc->pflip_status, 3178 AMDGPU_FLIP_SUBMITTED); 3179 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); 3180 return 0; 3181 } 3182 3183 /* page flip completed. clean up */ 3184 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE; 3185 amdgpu_crtc->pflip_works = NULL; 3186 3187 /* wakeup usersapce */ 3188 if (works->event) 3189 drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event); 3190 3191 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); 3192 3193 drm_crtc_vblank_put(&amdgpu_crtc->base); 3194 schedule_work(&works->unpin_work); 3195 3196 return 0; 3197 } 3198 3199 static int dce_v6_0_hpd_irq(struct amdgpu_device *adev, 3200 struct amdgpu_irq_src *source, 3201 struct amdgpu_iv_entry *entry) 3202 { 3203 uint32_t disp_int, mask; 3204 unsigned hpd; 3205 3206 if (entry->src_data[0] >= adev->mode_info.num_hpd) { 3207 DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]); 3208 return 0; 3209 } 3210 3211 hpd = entry->src_data[0]; 3212 disp_int = RREG32(interrupt_status_offsets[hpd].reg); 3213 mask = interrupt_status_offsets[hpd].hpd; 3214 3215 if (disp_int & mask) { 3216 dce_v6_0_hpd_int_ack(adev, hpd); 3217 schedule_delayed_work(&adev->hotplug_work, 0); 3218 DRM_DEBUG("IH: HPD%d\n", hpd + 1); 3219 } 3220 3221 return 0; 3222 } 3223 3224 static int dce_v6_0_set_clockgating_state(struct amdgpu_ip_block *ip_block, 3225 enum amd_clockgating_state state) 3226 { 3227 return 0; 3228 } 3229 3230 static int dce_v6_0_set_powergating_state(struct amdgpu_ip_block *ip_block, 3231 enum amd_powergating_state state) 3232 { 3233 return 0; 3234 } 3235 3236 static const struct amd_ip_funcs dce_v6_0_ip_funcs = { 3237 .name = "dce_v6_0", 3238 .early_init = dce_v6_0_early_init, 3239 .sw_init = dce_v6_0_sw_init, 3240 .sw_fini = dce_v6_0_sw_fini, 3241 .hw_init = dce_v6_0_hw_init, 3242 .hw_fini = dce_v6_0_hw_fini, 3243 .suspend = dce_v6_0_suspend, 3244 .resume = dce_v6_0_resume, 3245 .is_idle = dce_v6_0_is_idle, 3246 .soft_reset = dce_v6_0_soft_reset, 3247 .set_clockgating_state = dce_v6_0_set_clockgating_state, 3248 .set_powergating_state = dce_v6_0_set_powergating_state, 3249 }; 3250 3251 static void dce_v6_0_encoder_mode_set(struct drm_encoder *encoder, 3252 struct drm_display_mode *mode, 3253 struct drm_display_mode *adjusted_mode) 3254 { 3255 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 3256 int em = amdgpu_atombios_encoder_get_encoder_mode(encoder); 3257 3258 amdgpu_encoder->pixel_clock = adjusted_mode->clock; 3259 3260 /* need to call this here rather than in prepare() since we need some crtc info */ 3261 amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); 3262 3263 /* set scaler clears this on some chips */ 3264 dce_v6_0_set_interleave(encoder->crtc, mode); 3265 3266 if (em == ATOM_ENCODER_MODE_HDMI || ENCODER_MODE_IS_DP(em)) { 3267 dce_v6_0_afmt_enable(encoder, true); 3268 dce_v6_0_afmt_setmode(encoder, adjusted_mode); 3269 } 3270 } 3271 3272 static void dce_v6_0_encoder_prepare(struct drm_encoder *encoder) 3273 { 3274 struct amdgpu_device *adev = drm_to_adev(encoder->dev); 3275 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 3276 struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder); 3277 3278 if ((amdgpu_encoder->active_device & 3279 (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) || 3280 (amdgpu_encoder_get_dp_bridge_encoder_id(encoder) != 3281 ENCODER_OBJECT_ID_NONE)) { 3282 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 3283 if (dig) { 3284 dig->dig_encoder = dce_v6_0_pick_dig_encoder(encoder); 3285 if (amdgpu_encoder->active_device & ATOM_DEVICE_DFP_SUPPORT) 3286 dig->afmt = adev->mode_info.afmt[dig->dig_encoder]; 3287 } 3288 } 3289 3290 amdgpu_atombios_scratch_regs_lock(adev, true); 3291 3292 if (connector) { 3293 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); 3294 3295 /* select the clock/data port if it uses a router */ 3296 if (amdgpu_connector->router.cd_valid) 3297 amdgpu_i2c_router_select_cd_port(amdgpu_connector); 3298 3299 /* turn eDP panel on for mode set */ 3300 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) 3301 amdgpu_atombios_encoder_set_edp_panel_power(connector, 3302 ATOM_TRANSMITTER_ACTION_POWER_ON); 3303 } 3304 3305 /* this is needed for the pll/ss setup to work correctly in some cases */ 3306 amdgpu_atombios_encoder_set_crtc_source(encoder); 3307 /* set up the FMT blocks */ 3308 dce_v6_0_program_fmt(encoder); 3309 } 3310 3311 static void dce_v6_0_encoder_commit(struct drm_encoder *encoder) 3312 { 3313 struct drm_device *dev = encoder->dev; 3314 struct amdgpu_device *adev = drm_to_adev(dev); 3315 3316 /* need to call this here as we need the crtc set up */ 3317 amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_ON); 3318 amdgpu_atombios_scratch_regs_lock(adev, false); 3319 } 3320 3321 static void dce_v6_0_encoder_disable(struct drm_encoder *encoder) 3322 { 3323 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 3324 struct amdgpu_encoder_atom_dig *dig; 3325 int em = amdgpu_atombios_encoder_get_encoder_mode(encoder); 3326 3327 amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); 3328 3329 if (amdgpu_atombios_encoder_is_digital(encoder)) { 3330 if (em == ATOM_ENCODER_MODE_HDMI || ENCODER_MODE_IS_DP(em)) 3331 dce_v6_0_afmt_enable(encoder, false); 3332 dig = amdgpu_encoder->enc_priv; 3333 dig->dig_encoder = -1; 3334 } 3335 amdgpu_encoder->active_device = 0; 3336 } 3337 3338 /* these are handled by the primary encoders */ 3339 static void dce_v6_0_ext_prepare(struct drm_encoder *encoder) 3340 { 3341 3342 } 3343 3344 static void dce_v6_0_ext_commit(struct drm_encoder *encoder) 3345 { 3346 3347 } 3348 3349 static void dce_v6_0_ext_mode_set(struct drm_encoder *encoder, 3350 struct drm_display_mode *mode, 3351 struct drm_display_mode *adjusted_mode) 3352 { 3353 3354 } 3355 3356 static void dce_v6_0_ext_disable(struct drm_encoder *encoder) 3357 { 3358 3359 } 3360 3361 static void dce_v6_0_ext_dpms(struct drm_encoder *encoder, int mode) 3362 { 3363 3364 } 3365 3366 static bool dce_v6_0_ext_mode_fixup(struct drm_encoder *encoder, 3367 const struct drm_display_mode *mode, 3368 struct drm_display_mode *adjusted_mode) 3369 { 3370 return true; 3371 } 3372 3373 static const struct drm_encoder_helper_funcs dce_v6_0_ext_helper_funcs = { 3374 .dpms = dce_v6_0_ext_dpms, 3375 .mode_fixup = dce_v6_0_ext_mode_fixup, 3376 .prepare = dce_v6_0_ext_prepare, 3377 .mode_set = dce_v6_0_ext_mode_set, 3378 .commit = dce_v6_0_ext_commit, 3379 .disable = dce_v6_0_ext_disable, 3380 /* no detect for TMDS/LVDS yet */ 3381 }; 3382 3383 static const struct drm_encoder_helper_funcs dce_v6_0_dig_helper_funcs = { 3384 .dpms = amdgpu_atombios_encoder_dpms, 3385 .mode_fixup = amdgpu_atombios_encoder_mode_fixup, 3386 .prepare = dce_v6_0_encoder_prepare, 3387 .mode_set = dce_v6_0_encoder_mode_set, 3388 .commit = dce_v6_0_encoder_commit, 3389 .disable = dce_v6_0_encoder_disable, 3390 .detect = amdgpu_atombios_encoder_dig_detect, 3391 }; 3392 3393 static const struct drm_encoder_helper_funcs dce_v6_0_dac_helper_funcs = { 3394 .dpms = amdgpu_atombios_encoder_dpms, 3395 .mode_fixup = amdgpu_atombios_encoder_mode_fixup, 3396 .prepare = dce_v6_0_encoder_prepare, 3397 .mode_set = dce_v6_0_encoder_mode_set, 3398 .commit = dce_v6_0_encoder_commit, 3399 .detect = amdgpu_atombios_encoder_dac_detect, 3400 }; 3401 3402 static void dce_v6_0_encoder_destroy(struct drm_encoder *encoder) 3403 { 3404 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 3405 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) 3406 amdgpu_atombios_encoder_fini_backlight(amdgpu_encoder); 3407 kfree(amdgpu_encoder->enc_priv); 3408 drm_encoder_cleanup(encoder); 3409 kfree(amdgpu_encoder); 3410 } 3411 3412 static const struct drm_encoder_funcs dce_v6_0_encoder_funcs = { 3413 .destroy = dce_v6_0_encoder_destroy, 3414 }; 3415 3416 static void dce_v6_0_encoder_add(struct amdgpu_device *adev, 3417 uint32_t encoder_enum, 3418 uint32_t supported_device, 3419 u16 caps) 3420 { 3421 struct drm_device *dev = adev_to_drm(adev); 3422 struct drm_encoder *encoder; 3423 struct amdgpu_encoder *amdgpu_encoder; 3424 3425 /* see if we already added it */ 3426 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 3427 amdgpu_encoder = to_amdgpu_encoder(encoder); 3428 if (amdgpu_encoder->encoder_enum == encoder_enum) { 3429 amdgpu_encoder->devices |= supported_device; 3430 return; 3431 } 3432 } 3433 3434 /* add a new one */ 3435 amdgpu_encoder = kzalloc(sizeof(struct amdgpu_encoder), GFP_KERNEL); 3436 if (!amdgpu_encoder) 3437 return; 3438 3439 encoder = &amdgpu_encoder->base; 3440 switch (adev->mode_info.num_crtc) { 3441 case 1: 3442 encoder->possible_crtcs = 0x1; 3443 break; 3444 case 2: 3445 default: 3446 encoder->possible_crtcs = 0x3; 3447 break; 3448 case 4: 3449 encoder->possible_crtcs = 0xf; 3450 break; 3451 case 6: 3452 encoder->possible_crtcs = 0x3f; 3453 break; 3454 } 3455 3456 amdgpu_encoder->enc_priv = NULL; 3457 amdgpu_encoder->encoder_enum = encoder_enum; 3458 amdgpu_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT; 3459 amdgpu_encoder->devices = supported_device; 3460 amdgpu_encoder->rmx_type = RMX_OFF; 3461 amdgpu_encoder->underscan_type = UNDERSCAN_OFF; 3462 amdgpu_encoder->is_ext_encoder = false; 3463 amdgpu_encoder->caps = caps; 3464 3465 switch (amdgpu_encoder->encoder_id) { 3466 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: 3467 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: 3468 drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs, 3469 DRM_MODE_ENCODER_DAC, NULL); 3470 drm_encoder_helper_add(encoder, &dce_v6_0_dac_helper_funcs); 3471 break; 3472 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: 3473 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: 3474 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: 3475 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: 3476 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3: 3477 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { 3478 amdgpu_encoder->rmx_type = RMX_FULL; 3479 drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs, 3480 DRM_MODE_ENCODER_LVDS, NULL); 3481 amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_lcd_info(amdgpu_encoder); 3482 } else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) { 3483 drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs, 3484 DRM_MODE_ENCODER_DAC, NULL); 3485 amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder); 3486 } else { 3487 drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs, 3488 DRM_MODE_ENCODER_TMDS, NULL); 3489 amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder); 3490 } 3491 drm_encoder_helper_add(encoder, &dce_v6_0_dig_helper_funcs); 3492 break; 3493 case ENCODER_OBJECT_ID_SI170B: 3494 case ENCODER_OBJECT_ID_CH7303: 3495 case ENCODER_OBJECT_ID_EXTERNAL_SDVOA: 3496 case ENCODER_OBJECT_ID_EXTERNAL_SDVOB: 3497 case ENCODER_OBJECT_ID_TITFP513: 3498 case ENCODER_OBJECT_ID_VT1623: 3499 case ENCODER_OBJECT_ID_HDMI_SI1930: 3500 case ENCODER_OBJECT_ID_TRAVIS: 3501 case ENCODER_OBJECT_ID_NUTMEG: 3502 /* these are handled by the primary encoders */ 3503 amdgpu_encoder->is_ext_encoder = true; 3504 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) 3505 drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs, 3506 DRM_MODE_ENCODER_LVDS, NULL); 3507 else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) 3508 drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs, 3509 DRM_MODE_ENCODER_DAC, NULL); 3510 else 3511 drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs, 3512 DRM_MODE_ENCODER_TMDS, NULL); 3513 drm_encoder_helper_add(encoder, &dce_v6_0_ext_helper_funcs); 3514 break; 3515 } 3516 } 3517 3518 static const struct amdgpu_display_funcs dce_v6_0_display_funcs = { 3519 .bandwidth_update = &dce_v6_0_bandwidth_update, 3520 .vblank_get_counter = &dce_v6_0_vblank_get_counter, 3521 .backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level, 3522 .backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level, 3523 .hpd_sense = &dce_v6_0_hpd_sense, 3524 .hpd_set_polarity = &dce_v6_0_hpd_set_polarity, 3525 .hpd_get_gpio_reg = &dce_v6_0_hpd_get_gpio_reg, 3526 .page_flip = &dce_v6_0_page_flip, 3527 .page_flip_get_scanoutpos = &dce_v6_0_crtc_get_scanoutpos, 3528 .add_encoder = &dce_v6_0_encoder_add, 3529 .add_connector = &amdgpu_connector_add, 3530 }; 3531 3532 static void dce_v6_0_set_display_funcs(struct amdgpu_device *adev) 3533 { 3534 adev->mode_info.funcs = &dce_v6_0_display_funcs; 3535 } 3536 3537 static const struct amdgpu_irq_src_funcs dce_v6_0_crtc_irq_funcs = { 3538 .set = dce_v6_0_set_crtc_irq_state, 3539 .process = dce_v6_0_crtc_irq, 3540 }; 3541 3542 static const struct amdgpu_irq_src_funcs dce_v6_0_pageflip_irq_funcs = { 3543 .set = dce_v6_0_set_pageflip_irq_state, 3544 .process = dce_v6_0_pageflip_irq, 3545 }; 3546 3547 static const struct amdgpu_irq_src_funcs dce_v6_0_hpd_irq_funcs = { 3548 .set = dce_v6_0_set_hpd_irq_state, 3549 .process = dce_v6_0_hpd_irq, 3550 }; 3551 3552 static void dce_v6_0_set_irq_funcs(struct amdgpu_device *adev) 3553 { 3554 if (adev->mode_info.num_crtc > 0) 3555 adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_VLINE1 + adev->mode_info.num_crtc; 3556 else 3557 adev->crtc_irq.num_types = 0; 3558 adev->crtc_irq.funcs = &dce_v6_0_crtc_irq_funcs; 3559 3560 adev->pageflip_irq.num_types = adev->mode_info.num_crtc; 3561 adev->pageflip_irq.funcs = &dce_v6_0_pageflip_irq_funcs; 3562 3563 adev->hpd_irq.num_types = adev->mode_info.num_hpd; 3564 adev->hpd_irq.funcs = &dce_v6_0_hpd_irq_funcs; 3565 } 3566 3567 const struct amdgpu_ip_block_version dce_v6_0_ip_block = 3568 { 3569 .type = AMD_IP_BLOCK_TYPE_DCE, 3570 .major = 6, 3571 .minor = 0, 3572 .rev = 0, 3573 .funcs = &dce_v6_0_ip_funcs, 3574 }; 3575 3576 const struct amdgpu_ip_block_version dce_v6_4_ip_block = 3577 { 3578 .type = AMD_IP_BLOCK_TYPE_DCE, 3579 .major = 6, 3580 .minor = 4, 3581 .rev = 0, 3582 .funcs = &dce_v6_0_ip_funcs, 3583 }; 3584