Lines Matching +full:vm +full:- +full:active +full:- +full:channels
1210 switch (rdev->family) {
1278 * si_get_allowed_info_register - fetch the register for the info ioctl
1284 * Returns 0 for success or -EINVAL for an invalid register
1303 return -EINVAL;
1311 * si_get_xclk - get the xclk
1320 u32 reference_clock = rdev->clock.spll.reference_freq;
1560 if (!rdev->mc_fw)
1561 return -EINVAL;
1563 if (rdev->new_fw) {
1565 (const struct mc_firmware_header_v1_0 *)rdev->mc_fw->data;
1567 radeon_ucode_print_mc_hdr(&hdr->header);
1568 regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2);
1570 (rdev->mc_fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
1571 ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
1573 (rdev->mc_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1575 ucode_size = rdev->mc_fw->size / 4;
1577 switch (rdev->family) {
1600 fw_data = (const __be32 *)rdev->mc_fw->data;
1612 if (rdev->new_fw) {
1622 if (rdev->new_fw)
1628 /* put the engine back into the active state */
1634 for (i = 0; i < rdev->usec_timeout; i++) {
1639 for (i = 0; i < rdev->usec_timeout; i++) {
1664 switch (rdev->family) {
1678 if ((rdev->pdev->revision == 0x81) &&
1679 ((rdev->pdev->device == 0x6810) ||
1680 (rdev->pdev->device == 0x6811)))
1693 if (((rdev->pdev->device == 0x6820) &&
1694 ((rdev->pdev->revision == 0x81) ||
1695 (rdev->pdev->revision == 0x83))) ||
1696 ((rdev->pdev->device == 0x6821) &&
1697 ((rdev->pdev->revision == 0x83) ||
1698 (rdev->pdev->revision == 0x87))) ||
1699 ((rdev->pdev->revision == 0x87) &&
1700 ((rdev->pdev->device == 0x6823) ||
1701 (rdev->pdev->device == 0x682b))))
1714 if (((rdev->pdev->revision == 0x81) &&
1715 ((rdev->pdev->device == 0x6600) ||
1716 (rdev->pdev->device == 0x6604) ||
1717 (rdev->pdev->device == 0x6605) ||
1718 (rdev->pdev->device == 0x6610))) ||
1719 ((rdev->pdev->revision == 0x83) &&
1720 (rdev->pdev->device == 0x6610)))
1732 if (((rdev->pdev->revision == 0x81) &&
1733 (rdev->pdev->device == 0x6660)) ||
1734 ((rdev->pdev->revision == 0x83) &&
1735 ((rdev->pdev->device == 0x6660) ||
1736 (rdev->pdev->device == 0x6663) ||
1737 (rdev->pdev->device == 0x6665) ||
1738 (rdev->pdev->device == 0x6667))))
1740 else if ((rdev->pdev->revision == 0xc3) &&
1741 (rdev->pdev->device == 0x6665))
1762 err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev);
1765 err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev);
1768 if (rdev->pfp_fw->size != pfp_req_size) {
1770 rdev->pfp_fw->size, fw_name);
1771 err = -EINVAL;
1775 err = radeon_ucode_validate(rdev->pfp_fw);
1786 err = request_firmware(&rdev->me_fw, fw_name, rdev->dev);
1789 err = request_firmware(&rdev->me_fw, fw_name, rdev->dev);
1792 if (rdev->me_fw->size != me_req_size) {
1794 rdev->me_fw->size, fw_name);
1795 err = -EINVAL;
1798 err = radeon_ucode_validate(rdev->me_fw);
1809 err = request_firmware(&rdev->ce_fw, fw_name, rdev->dev);
1812 err = request_firmware(&rdev->ce_fw, fw_name, rdev->dev);
1815 if (rdev->ce_fw->size != ce_req_size) {
1817 rdev->ce_fw->size, fw_name);
1818 err = -EINVAL;
1821 err = radeon_ucode_validate(rdev->ce_fw);
1832 err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev);
1835 err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev);
1838 if (rdev->rlc_fw->size != rlc_req_size) {
1840 rdev->rlc_fw->size, fw_name);
1841 err = -EINVAL;
1844 err = radeon_ucode_validate(rdev->rlc_fw);
1858 err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
1861 err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
1864 err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
1868 if ((rdev->mc_fw->size != mc_req_size) &&
1869 (rdev->mc_fw->size != mc2_req_size)) {
1871 rdev->mc_fw->size, fw_name);
1872 err = -EINVAL;
1874 DRM_INFO("%s: %zu bytes\n", fw_name, rdev->mc_fw->size);
1876 err = radeon_ucode_validate(rdev->mc_fw);
1892 err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
1895 err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
1898 release_firmware(rdev->smc_fw);
1899 rdev->smc_fw = NULL;
1901 } else if (rdev->smc_fw->size != smc_req_size) {
1903 rdev->smc_fw->size, fw_name);
1904 err = -EINVAL;
1907 err = radeon_ucode_validate(rdev->smc_fw);
1918 rdev->new_fw = false;
1921 err = -EINVAL;
1923 rdev->new_fw = true;
1927 if (err != -EINVAL)
1930 release_firmware(rdev->pfp_fw);
1931 rdev->pfp_fw = NULL;
1932 release_firmware(rdev->me_fw);
1933 rdev->me_fw = NULL;
1934 release_firmware(rdev->ce_fw);
1935 rdev->ce_fw = NULL;
1936 release_firmware(rdev->rlc_fw);
1937 rdev->rlc_fw = NULL;
1938 release_firmware(rdev->mc_fw);
1939 rdev->mc_fw = NULL;
1940 release_firmware(rdev->smc_fw);
1941 rdev->smc_fw = NULL;
1953 u32 pipe_offset = radeon_crtc->crtc_id * 0x20;
1960 * 0 - half lb
1961 * 2 - whole lb, other crtc must be disabled
1965 * non-linked crtcs for maximum line buffer allocation.
1967 if (radeon_crtc->base.enabled && mode) {
1980 WREG32(DC_LB_MEMORY_SPLIT + radeon_crtc->crtc_offset,
1985 for (i = 0; i < rdev->usec_timeout; i++) {
1992 if (radeon_crtc->base.enabled && mode) {
2034 u32 dram_channels; /* number of dram channels */
2039 u32 active_time; /* active display time in ns */
2043 u32 num_heads; /* number of active crtcs */
2057 yclk.full = dfixed_const(wm->yclk);
2059 dram_channels.full = dfixed_const(wm->dram_channels * 4);
2077 yclk.full = dfixed_const(wm->yclk);
2079 dram_channels.full = dfixed_const(wm->dram_channels * 4);
2097 sclk.full = dfixed_const(wm->sclk);
2123 disp_clk.full = dfixed_const(wm->disp_clk);
2129 sclk.full = dfixed_const(wm->sclk);
2169 line_time.full = dfixed_const(wm->active_time + wm->blank_time);
2171 bpp.full = dfixed_const(wm->bytes_per_pixel);
2172 src_width.full = dfixed_const(wm->src_width);
2174 bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
2187 u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */
2188 u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
2189 (wm->num_heads * cursor_line_pair_return_time);
2195 if (wm->num_heads == 0)
2200 if ((wm->vsc.full > a.full) ||
2201 ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
2202 (wm->vtaps >= 5) ||
2203 ((wm->vsc.full >= a.full) && wm->interlaced))
2209 b.full = dfixed_const(wm->num_heads);
2211 tmp = div_u64((u64) dmif_size * (u64) wm->disp_clk, mc_latency + 512);
2214 lb_fill_bw = min(tmp, wm->disp_clk * wm->bytes_per_pixel / 1000);
2216 a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
2223 if (line_fill_time < wm->active_time)
2226 return latency + (line_fill_time - wm->active_time);
2233 (dce6_dram_bandwidth_for_display(wm) / wm->num_heads))
2242 (dce6_available_bandwidth(wm) / wm->num_heads))
2250 u32 lb_partitions = wm->lb_size / wm->src_width;
2251 u32 line_time = wm->active_time + wm->blank_time;
2257 if (wm->vsc.full > a.full)
2260 if (lb_partitions <= (wm->vtaps + 1))
2266 latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
2278 struct drm_display_mode *mode = &radeon_crtc->base.mode;
2290 if (radeon_crtc->base.enabled && num_heads && mode) {
2291 active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
2292 (u32)mode->clock);
2293 line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
2294 (u32)mode->clock);
2299 if (rdev->family == CHIP_ARUBA)
2305 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
2311 wm_high.yclk = rdev->pm.current_mclk * 10;
2312 wm_high.sclk = rdev->pm.current_sclk * 10;
2315 wm_high.disp_clk = mode->clock;
2316 wm_high.src_width = mode->crtc_hdisplay;
2318 wm_high.blank_time = line_time - wm_high.active_time;
2320 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
2322 wm_high.vsc = radeon_crtc->vsc;
2324 if (radeon_crtc->rmx_type != RMX_OFF)
2332 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
2338 wm_low.yclk = rdev->pm.current_mclk * 10;
2339 wm_low.sclk = rdev->pm.current_sclk * 10;
2342 wm_low.disp_clk = mode->clock;
2343 wm_low.src_width = mode->crtc_hdisplay;
2345 wm_low.blank_time = line_time - wm_low.active_time;
2347 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
2349 wm_low.vsc = radeon_crtc->vsc;
2351 if (radeon_crtc->rmx_type != RMX_OFF)
2368 (rdev->disp_priority == 2)) {
2376 (rdev->disp_priority == 2)) {
2383 b.full = dfixed_const(mode->clock);
2387 c.full = dfixed_mul(c, radeon_crtc->hsc);
2395 b.full = dfixed_const(mode->clock);
2399 c.full = dfixed_mul(c, radeon_crtc->hsc);
2407 radeon_crtc->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay);
2411 arb_control3 = RREG32(DPG_PIPE_ARBITRATION_CONTROL3 + radeon_crtc->crtc_offset);
2415 WREG32(DPG_PIPE_ARBITRATION_CONTROL3 + radeon_crtc->crtc_offset, tmp);
2416 WREG32(DPG_PIPE_LATENCY_CONTROL + radeon_crtc->crtc_offset,
2420 tmp = RREG32(DPG_PIPE_ARBITRATION_CONTROL3 + radeon_crtc->crtc_offset);
2423 WREG32(DPG_PIPE_ARBITRATION_CONTROL3 + radeon_crtc->crtc_offset, tmp);
2424 WREG32(DPG_PIPE_LATENCY_CONTROL + radeon_crtc->crtc_offset,
2428 WREG32(DPG_PIPE_ARBITRATION_CONTROL3 + radeon_crtc->crtc_offset, arb_control3);
2431 WREG32(PRIORITY_A_CNT + radeon_crtc->crtc_offset, priority_a_cnt);
2432 WREG32(PRIORITY_B_CNT + radeon_crtc->crtc_offset, priority_b_cnt);
2435 radeon_crtc->line_time = line_time;
2436 radeon_crtc->wm_high = latency_watermark_a;
2437 radeon_crtc->wm_low = latency_watermark_b;
2447 if (!rdev->mode_info.mode_config_initialized)
2452 for (i = 0; i < rdev->num_crtc; i++) {
2453 if (rdev->mode_info.crtcs[i]->base.enabled)
2456 for (i = 0; i < rdev->num_crtc; i += 2) {
2457 mode0 = &rdev->mode_info.crtcs[i]->base.mode;
2458 mode1 = &rdev->mode_info.crtcs[i+1]->base.mode;
2459 lb_size = dce6_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i], mode0, mode1);
2460 dce6_program_watermarks(rdev, rdev->mode_info.crtcs[i], lb_size, num_heads);
2461 lb_size = dce6_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i+1], mode1, mode0);
2462 dce6_program_watermarks(rdev, rdev->mode_info.crtcs[i+1], lb_size, num_heads);
2471 u32 *tile = rdev->config.si.tile_mode_array;
2473 ARRAY_SIZE(rdev->config.si.tile_mode_array);
2476 switch (rdev->config.si.mem_row_size_in_kb) {
2492 switch(rdev->family) {
2495 /* non-AA compressed depth or any compressed stencil */
2531 /* Maps w/ a dimension less than the 2D macro-tile dimensions (for mipmapped depth textures) */
2540 /* Uncompressed 16bpp depth - and stencil buffer allocated with it */
2549 /* Uncompressed 32bpp depth - and stencil buffer allocated with it */
2710 /* non-AA compressed depth or any compressed stencil */
2746 /* Maps w/ a dimension less than the 2D macro-tile dimensions (for mipmapped depth textures) */
2755 /* Uncompressed 16bpp depth - and stencil buffer allocated with it */
2764 /* Uncompressed 32bpp depth - and stencil buffer allocated with it */
2923 DRM_ERROR("unknown asic: 0x%x\n", rdev->family);
3044 rdev->config.si.backend_enable_mask = enabled_rbs;
3078 switch (rdev->family) {
3080 rdev->config.si.max_shader_engines = 2;
3081 rdev->config.si.max_tile_pipes = 12;
3082 rdev->config.si.max_cu_per_sh = 8;
3083 rdev->config.si.max_sh_per_se = 2;
3084 rdev->config.si.max_backends_per_se = 4;
3085 rdev->config.si.max_texture_channel_caches = 12;
3086 rdev->config.si.max_gprs = 256;
3087 rdev->config.si.max_gs_threads = 32;
3088 rdev->config.si.max_hw_contexts = 8;
3090 rdev->config.si.sc_prim_fifo_size_frontend = 0x20;
3091 rdev->config.si.sc_prim_fifo_size_backend = 0x100;
3092 rdev->config.si.sc_hiz_tile_fifo_size = 0x30;
3093 rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
3097 rdev->config.si.max_shader_engines = 2;
3098 rdev->config.si.max_tile_pipes = 8;
3099 rdev->config.si.max_cu_per_sh = 5;
3100 rdev->config.si.max_sh_per_se = 2;
3101 rdev->config.si.max_backends_per_se = 4;
3102 rdev->config.si.max_texture_channel_caches = 8;
3103 rdev->config.si.max_gprs = 256;
3104 rdev->config.si.max_gs_threads = 32;
3105 rdev->config.si.max_hw_contexts = 8;
3107 rdev->config.si.sc_prim_fifo_size_frontend = 0x20;
3108 rdev->config.si.sc_prim_fifo_size_backend = 0x100;
3109 rdev->config.si.sc_hiz_tile_fifo_size = 0x30;
3110 rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
3115 rdev->config.si.max_shader_engines = 1;
3116 rdev->config.si.max_tile_pipes = 4;
3117 rdev->config.si.max_cu_per_sh = 5;
3118 rdev->config.si.max_sh_per_se = 2;
3119 rdev->config.si.max_backends_per_se = 4;
3120 rdev->config.si.max_texture_channel_caches = 4;
3121 rdev->config.si.max_gprs = 256;
3122 rdev->config.si.max_gs_threads = 32;
3123 rdev->config.si.max_hw_contexts = 8;
3125 rdev->config.si.sc_prim_fifo_size_frontend = 0x20;
3126 rdev->config.si.sc_prim_fifo_size_backend = 0x40;
3127 rdev->config.si.sc_hiz_tile_fifo_size = 0x30;
3128 rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
3132 rdev->config.si.max_shader_engines = 1;
3133 rdev->config.si.max_tile_pipes = 4;
3134 rdev->config.si.max_cu_per_sh = 6;
3135 rdev->config.si.max_sh_per_se = 1;
3136 rdev->config.si.max_backends_per_se = 2;
3137 rdev->config.si.max_texture_channel_caches = 4;
3138 rdev->config.si.max_gprs = 256;
3139 rdev->config.si.max_gs_threads = 16;
3140 rdev->config.si.max_hw_contexts = 8;
3142 rdev->config.si.sc_prim_fifo_size_frontend = 0x20;
3143 rdev->config.si.sc_prim_fifo_size_backend = 0x40;
3144 rdev->config.si.sc_hiz_tile_fifo_size = 0x30;
3145 rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
3149 rdev->config.si.max_shader_engines = 1;
3150 rdev->config.si.max_tile_pipes = 4;
3151 rdev->config.si.max_cu_per_sh = 5;
3152 rdev->config.si.max_sh_per_se = 1;
3153 rdev->config.si.max_backends_per_se = 1;
3154 rdev->config.si.max_texture_channel_caches = 2;
3155 rdev->config.si.max_gprs = 256;
3156 rdev->config.si.max_gs_threads = 16;
3157 rdev->config.si.max_hw_contexts = 8;
3159 rdev->config.si.sc_prim_fifo_size_frontend = 0x20;
3160 rdev->config.si.sc_prim_fifo_size_backend = 0x40;
3161 rdev->config.si.sc_hiz_tile_fifo_size = 0x30;
3162 rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
3187 rdev->config.si.num_tile_pipes = rdev->config.si.max_tile_pipes;
3188 rdev->config.si.mem_max_burst_length_bytes = 256;
3190 rdev->config.si.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
3191 if (rdev->config.si.mem_row_size_in_kb > 4)
3192 rdev->config.si.mem_row_size_in_kb = 4;
3194 rdev->config.si.shader_engine_tile_size = 32;
3195 rdev->config.si.num_gpus = 1;
3196 rdev->config.si.multi_gpu_tile_size = 64;
3200 switch (rdev->config.si.mem_row_size_in_kb) {
3220 rdev->config.si.tile_config = 0;
3221 switch (rdev->config.si.num_tile_pipes) {
3223 rdev->config.si.tile_config |= (0 << 0);
3226 rdev->config.si.tile_config |= (1 << 0);
3229 rdev->config.si.tile_config |= (2 << 0);
3234 rdev->config.si.tile_config |= (3 << 0);
3239 rdev->config.si.tile_config |= 0 << 4;
3242 rdev->config.si.tile_config |= 1 << 4;
3246 rdev->config.si.tile_config |= 2 << 4;
3249 rdev->config.si.tile_config |=
3251 rdev->config.si.tile_config |=
3260 if (rdev->has_uvd) {
3268 si_setup_rb(rdev, rdev->config.si.max_shader_engines,
3269 rdev->config.si.max_sh_per_se,
3270 rdev->config.si.max_backends_per_se);
3272 si_setup_spi(rdev, rdev->config.si.max_shader_engines,
3273 rdev->config.si.max_sh_per_se,
3274 rdev->config.si.max_cu_per_sh);
3276 rdev->config.si.active_cus = 0;
3277 for (i = 0; i < rdev->config.si.max_shader_engines; i++) {
3278 for (j = 0; j < rdev->config.si.max_sh_per_se; j++) {
3279 rdev->config.si.active_cus +=
3294 WREG32(PA_SC_FIFO_SIZE, (SC_FRONTEND_PRIM_FIFO_SIZE(rdev->config.si.sc_prim_fifo_size_frontend) |
3295 SC_BACKEND_PRIM_FIFO_SIZE(rdev->config.si.sc_prim_fifo_size_backend) |
3296 SC_HIZ_TILE_FIFO_SIZE(rdev->config.si.sc_hiz_tile_fifo_size) |
3297 SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.si.sc_earlyz_tile_fifo_size)));
3342 rdev->scratch.num_reg = 7;
3343 rdev->scratch.reg_base = SCRATCH_REG0;
3344 for (i = 0; i < rdev->scratch.num_reg; i++) {
3345 rdev->scratch.free[i] = true;
3346 rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
3353 struct radeon_ring *ring = &rdev->ring[fence->ring];
3354 u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
3358 radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2);
3368 /* EVENT_WRITE_EOP - flush caches, send int */
3373 radeon_ring_write(ring, fence->seq);
3382 struct radeon_ring *ring = &rdev->ring[ib->ring];
3383 unsigned vm_id = ib->vm ? ib->vm->ids[ib->ring].id : 0;
3386 if (ib->is_const_ib) {
3394 if (ring->rptr_save_reg) {
3395 next_rptr = ring->wptr + 3 + 4 + 8;
3397 radeon_ring_write(ring, ((ring->rptr_save_reg -
3400 } else if (rdev->wb.enabled) {
3401 next_rptr = ring->wptr + 5 + 4 + 8;
3404 radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
3405 radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr));
3417 (ib->gpu_addr & 0xFFFFFFFC));
3418 radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
3419 radeon_ring_write(ring, ib->length_dw | (vm_id << 24));
3421 if (!ib->is_const_ib) {
3424 radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2);
3445 if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
3446 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
3449 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
3450 rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
3451 rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
3460 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw)
3461 return -EINVAL;
3465 if (rdev->new_fw) {
3467 (const struct gfx_firmware_header_v1_0 *)rdev->pfp_fw->data;
3469 (const struct gfx_firmware_header_v1_0 *)rdev->ce_fw->data;
3471 (const struct gfx_firmware_header_v1_0 *)rdev->me_fw->data;
3475 radeon_ucode_print_gfx_hdr(&pfp_hdr->header);
3476 radeon_ucode_print_gfx_hdr(&ce_hdr->header);
3477 radeon_ucode_print_gfx_hdr(&me_hdr->header);
3481 (rdev->pfp_fw->data + le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes));
3482 fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes) / 4;
3490 (rdev->ce_fw->data + le32_to_cpu(ce_hdr->header.ucode_array_offset_bytes));
3491 fw_size = le32_to_cpu(ce_hdr->header.ucode_size_bytes) / 4;
3499 (rdev->me_fw->data + le32_to_cpu(me_hdr->header.ucode_array_offset_bytes));
3500 fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes) / 4;
3509 fw_data = (const __be32 *)rdev->pfp_fw->data;
3516 fw_data = (const __be32 *)rdev->ce_fw->data;
3523 fw_data = (const __be32 *)rdev->me_fw->data;
3539 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
3551 radeon_ring_write(ring, rdev->config.si.max_hw_contexts - 1);
3593 ring = &rdev->ring[i];
3615 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
3617 radeon_scratch_free(rdev, ring->rptr_save_reg);
3619 ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
3621 radeon_scratch_free(rdev, ring->rptr_save_reg);
3623 ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
3625 radeon_scratch_free(rdev, ring->rptr_save_reg);
3644 WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
3646 /* ring 0 - compute and gfx */
3648 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
3649 rb_bufsz = order_base_2(ring->ring_size / 8);
3658 ring->wptr = 0;
3659 WREG32(CP_RB0_WPTR, ring->wptr);
3662 WREG32(CP_RB0_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC);
3663 WREG32(CP_RB0_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
3665 if (rdev->wb.enabled)
3675 WREG32(CP_RB0_BASE, ring->gpu_addr >> 8);
3677 /* ring1 - compute only */
3679 ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
3680 rb_bufsz = order_base_2(ring->ring_size / 8);
3689 ring->wptr = 0;
3690 WREG32(CP_RB1_WPTR, ring->wptr);
3693 WREG32(CP_RB1_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFFFFFFFC);
3694 WREG32(CP_RB1_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFF);
3699 WREG32(CP_RB1_BASE, ring->gpu_addr >> 8);
3701 /* ring2 - compute only */
3703 ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
3704 rb_bufsz = order_base_2(ring->ring_size / 8);
3713 ring->wptr = 0;
3714 WREG32(CP_RB2_WPTR, ring->wptr);
3717 WREG32(CP_RB2_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFFFFFFFC);
3718 WREG32(CP_RB2_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFF);
3723 WREG32(CP_RB2_BASE, ring->gpu_addr >> 8);
3727 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = true;
3728 rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = true;
3729 rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = true;
3730 r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
3732 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
3733 rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
3734 rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
3737 r = radeon_ring_test(rdev, CAYMAN_RING_TYPE_CP1_INDEX, &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]);
3739 rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
3741 r = radeon_ring_test(rdev, CAYMAN_RING_TYPE_CP2_INDEX, &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]);
3743 rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
3748 if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
3749 radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
3844 dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
3847 dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
3849 dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
3879 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
3933 dev_info(rdev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
3947 dev_info(rdev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
3979 for (i = 0; i < rdev->usec_timeout; i++) {
4020 dev_info(rdev->dev, "GPU pci config reset\n");
4048 dev_warn(rdev->dev, "Wait for MC idle timed out !\n");
4056 pci_clear_master(rdev->pdev);
4060 for (i = 0; i < rdev->usec_timeout; i++) {
4099 * si_gfx_is_lockup - Check if the GFX engine is locked up
4139 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
4146 rdev->mc.vram_start >> 12);
4148 rdev->mc.vram_end >> 12);
4150 rdev->vram_scratch.gpu_addr >> 12);
4151 tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
4152 tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
4155 WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
4162 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
4175 if (mc->mc_vram_size > 0xFFC0000000ULL) {
4177 dev_warn(rdev->dev, "limiting VRAM\n");
4178 mc->real_vram_size = 0xFFC0000000ULL;
4179 mc->mc_vram_size = 0xFFC0000000ULL;
4181 radeon_vram_location(rdev, &rdev->mc, 0);
4182 rdev->mc.gtt_base_align = 0;
4192 rdev->mc.vram_is_ddr = true;
4232 rdev->mc.vram_width = numchan * chansize;
4234 rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
4235 rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
4244 rdev->mc.mc_vram_size = tmp * 1024ULL * 1024ULL;
4245 rdev->mc.real_vram_size = rdev->mc.mc_vram_size;
4246 rdev->mc.visible_vram_size = rdev->mc.aper_size;
4247 si_vram_gtt_location(rdev, &rdev->mc);
4261 /* bits 0-15 are the VM contexts0-15 */
4269 if (rdev->gart.robj == NULL) {
4270 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
4271 return -EINVAL;
4296 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
4297 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
4298 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
4300 (u32)(rdev->dummy_page.addr >> 12));
4309 /* empty context1-15 */
4310 /* set vm size, must be a multiple of 4 */
4312 WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn - 1);
4315 * on the fly in the vm part of radeon_gart.c
4320 rdev->vm_manager.saved_table_addr[i]);
4322 WREG32(VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((i - 8) << 2),
4323 rdev->vm_manager.saved_table_addr[i]);
4326 /* enable context1-15 */
4328 (u32)(rdev->dummy_page.addr >> 12));
4331 PAGE_TABLE_BLOCK_SIZE(radeon_vm_block_size - 9) |
4347 (unsigned)(rdev->mc.gtt_size >> 20),
4348 (unsigned long long)rdev->gart.table_addr);
4349 rdev->gart.ready = true;
4362 reg = VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((i - 8) << 2);
4363 rdev->vm_manager.saved_table_addr[i] = RREG32(reg);
4390 /* vm parser */
4440 switch (pkt->opcode) {
4453 DRM_ERROR("Invalid CE packet3: 0x%x\n", pkt->opcode);
4454 return -EINVAL;
4473 return -EINVAL;
4480 return -EINVAL;
4494 return -EINVAL;
4501 return -EINVAL;
4514 u32 idx = pkt->idx + 1;
4518 switch (pkt->opcode) {
4569 return -EINVAL;
4577 return -EINVAL;
4579 for (i = 0; i < (pkt->count - 2); i++) {
4582 return -EINVAL;
4591 return -EINVAL;
4598 return -EINVAL;
4603 end_reg = 4 * pkt->count + start_reg - 4;
4608 return -EINVAL;
4610 for (i = 0; i < pkt->count; i++) {
4613 return -EINVAL;
4622 DRM_ERROR("Invalid GFX packet3: 0x%x\n", pkt->opcode);
4623 return -EINVAL;
4632 u32 idx = pkt->idx + 1;
4636 switch (pkt->opcode) {
4672 return -EINVAL;
4680 return -EINVAL;
4682 for (i = 0; i < (pkt->count - 2); i++) {
4685 return -EINVAL;
4694 return -EINVAL;
4701 return -EINVAL;
4710 DRM_ERROR("Invalid Compute packet3: 0x%x\n", pkt->opcode);
4711 return -EINVAL;
4724 pkt.type = RADEON_CP_PACKET_GET_TYPE(ib->ptr[idx]);
4725 pkt.count = RADEON_CP_PACKET_GET_COUNT(ib->ptr[idx]);
4729 dev_err(rdev->dev, "Packet0 not allowed!\n");
4730 ret = -EINVAL;
4736 pkt.opcode = RADEON_CP_PACKET3_GET_OPCODE(ib->ptr[idx]);
4737 if (ib->is_const_ib)
4738 ret = si_vm_packet3_ce_check(rdev, ib->ptr, &pkt);
4740 switch (ib->ring) {
4742 ret = si_vm_packet3_gfx_check(rdev, ib->ptr, &pkt);
4746 ret = si_vm_packet3_compute_check(rdev, ib->ptr, &pkt);
4749 dev_err(rdev->dev, "Non-PM4 ring %d !\n", ib->ring);
4750 ret = -EINVAL;
4757 dev_err(rdev->dev, "Unknown packet type %d !\n", pkt.type);
4758 ret = -EINVAL;
4762 for (i = 0; i < ib->length_dw; i++) {
4764 printk("\t0x%08x <---\n", ib->ptr[i]);
4766 printk("\t0x%08x\n", ib->ptr[i]);
4770 } while (idx < ib->length_dw);
4776 * vm
4781 rdev->vm_manager.nvm = 16;
4783 rdev->vm_manager.vram_base_offset = 0;
4793 * si_vm_decode_fault - print human readable fault info
4809 if (rdev->family == CHIP_TAHITI) {
5050 printk("VM fault (0x%02x, vmid %d) at page %u, %s from %s (%d)\n",
5069 (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm_id - 8) << 2)) >> 2);
5082 /* bits 0-15 are the VM contexts0-15 */
5112 for (i = 0; i < rdev->usec_timeout; i++) {
5118 for (i = 0; i < rdev->usec_timeout; i++) {
5143 for (i = 0; i < rdev->usec_timeout; i++) {
5215 if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_SDMA))
5239 if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG)) {
5263 WREG32(RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8);
5269 WREG32(RLC_CLEAR_STATE_RESTORE_BASE, rdev->rlc.clear_state_gpu_addr >> 8);
5294 for (i = 0; i < rdev->config.si.max_cu_per_sh; i ++) {
5308 for (i = 0; i < rdev->config.si.max_shader_engines; i++) {
5309 for (j = 0; j < rdev->config.si.max_sh_per_se; j++) {
5313 for (k = 0; k < rdev->config.si.max_cu_per_sh; k++) {
5342 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_CGCG)) {
5380 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_MGCG)) {
5386 if (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_CP_LS) {
5436 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_UVD_MGCG)) {
5484 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_MC_LS))
5501 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_MC_MGCG))
5516 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_SDMA_MGCG)) {
5554 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_BIF_LS))
5572 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_HDP_MGCG))
5588 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_HDP_LS))
5627 if (rdev->has_uvd) {
5645 if (rdev->has_uvd) {
5653 if (rdev->has_uvd) {
5669 if (rdev->rlc.cs_data == NULL)
5677 for (sect = rdev->rlc.cs_data; sect->section != NULL; ++sect) {
5678 for (ext = sect->section; ext->extent != NULL; ++ext) {
5679 if (sect->id == SECT_CONTEXT)
5680 count += 2 + ext->reg_count;
5701 if (rdev->rlc.cs_data == NULL)
5713 for (sect = rdev->rlc.cs_data; sect->section != NULL; ++sect) {
5714 for (ext = sect->section; ext->extent != NULL; ++ext) {
5715 if (sect->id == SECT_CONTEXT) {
5717 cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
5718 buffer[count++] = cpu_to_le32(ext->reg_index - 0xa000);
5719 for (i = 0; i < ext->reg_count; i++)
5720 buffer[count++] = cpu_to_le32(ext->extent[i]);
5728 buffer[count++] = cpu_to_le32(PA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
5729 switch (rdev->family) {
5757 if (rdev->pg_flags) {
5758 if (rdev->pg_flags & RADEON_PG_SUPPORT_SDMA) {
5762 if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG) {
5765 WREG32(RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8);
5766 WREG32(RLC_CLEAR_STATE_RESTORE_BASE, rdev->rlc.clear_state_gpu_addr >> 8);
5771 WREG32(RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8);
5772 WREG32(RLC_CLEAR_STATE_RESTORE_BASE, rdev->rlc.clear_state_gpu_addr >> 8);
5778 if (rdev->pg_flags) {
5849 if (!rdev->rlc_fw)
5850 return -EINVAL;
5870 if (rdev->new_fw) {
5872 (const struct rlc_firmware_header_v1_0 *)rdev->rlc_fw->data;
5873 u32 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
5875 (rdev->rlc_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
5877 radeon_ucode_print_rlc_hdr(&hdr->header);
5885 (const __be32 *)rdev->rlc_fw->data;
5909 rdev->ih.enabled = true;
5924 rdev->ih.enabled = false;
5925 rdev->ih.rptr = 0;
5944 for (i = 0; i < rdev->num_crtc; i++)
5946 for (i = 0; i < rdev->num_crtc; i++)
5981 WREG32(INTERRUPT_CNTL2, rdev->dummy_page.addr >> 8);
5983 /* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi
5984 * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN
5987 /* IH_REQ_NONSNOOP_EN=1 if ring is in non-cacheable memory, e.g., vram */
5991 WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8);
5992 rb_bufsz = order_base_2(rdev->ih.ring_size / 4);
5998 if (rdev->wb.enabled)
6002 WREG32(IH_RB_WPTR_ADDR_LO, (rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFFFFFFFC);
6003 WREG32(IH_RB_WPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFF);
6014 if (rdev->msi_enabled)
6018 /* force the active interrupt state to all disabled */
6021 pci_set_master(rdev->pdev);
6039 if (!rdev->irq.installed) {
6041 return -EINVAL;
6044 if (!rdev->ih.enabled) {
6046 /* force the active interrupt state to all disabled */
6061 if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
6065 if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP1_INDEX])) {
6069 if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP2_INDEX])) {
6073 if (atomic_read(&rdev->irq.ring_int[R600_RING_TYPE_DMA_INDEX])) {
6078 if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_DMA1_INDEX])) {
6092 if (rdev->irq.dpm_thermal) {
6097 for (i = 0; i < rdev->num_crtc; i++) {
6100 rdev->irq.crtc_vblank_int[i] ||
6101 atomic_read(&rdev->irq.pflip[i]), "vblank", i);
6104 for (i = 0; i < rdev->num_crtc; i++)
6112 rdev->irq.hpd[i], "HPD", i);
6128 u32 *disp_int = rdev->irq.stat_regs.evergreen.disp_int;
6129 u32 *grph_int = rdev->irq.stat_regs.evergreen.grph_int;
6136 if (i < rdev->num_crtc)
6141 for (i = 0; i < rdev->num_crtc; i += 2) {
6194 if (rdev->wb.enabled)
6195 wptr = le32_to_cpu(rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]);
6205 dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
6206 wptr, rdev->ih.rptr, (wptr + 16) & rdev->ih.ptr_mask);
6207 rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
6212 return (wptr & rdev->ih.ptr_mask);
6217 * [7:0] - interrupt source id
6218 * [31:8] - reserved
6219 * [59:32] - interrupt source data
6220 * [63:60] - reserved
6221 * [71:64] - RINGID
6222 * [79:72] - VMID
6223 * [127:80] - reserved
6227 u32 *disp_int = rdev->irq.stat_regs.evergreen.disp_int;
6240 if (!rdev->ih.enabled || rdev->shutdown)
6247 if (atomic_xchg(&rdev->ih.lock, 1))
6250 rptr = rdev->ih.rptr;
6262 src_id = le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff;
6263 src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff;
6264 ring_id = le32_to_cpu(rdev->ih.ring[ring_index + 2]) & 0xff;
6273 crtc_idx = src_id - 1;
6279 if (rdev->irq.crtc_vblank_int[crtc_idx]) {
6281 rdev->pm.vblank_sync = true;
6282 wake_up(&rdev->irq.vblank_queue);
6284 if (atomic_read(&rdev->irq.pflip[crtc_idx])) {
6299 DRM_DEBUG("IH: D%d %s - IH event w/o asserted irq bit?\n",
6313 DRM_DEBUG("IH: D%d flip\n", ((src_id - 8) >> 1) + 1);
6315 radeon_crtc_handle_flip(rdev, (src_id - 8) >> 1);
6325 hpd_idx = src_data - 6;
6358 dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data);
6359 dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
6361 dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
6394 rdev->pm.dpm.thermal.high_to_low = false;
6399 rdev->pm.dpm.thermal.high_to_low = true;
6416 rptr &= rdev->ih.ptr_mask;
6420 schedule_work(&rdev->dp_work);
6422 schedule_delayed_work(&rdev->hotplug_work, 0);
6423 if (queue_thermal && rdev->pm.dpm_enabled)
6424 schedule_work(&rdev->pm.dpm.thermal.work);
6425 rdev->ih.rptr = rptr;
6426 atomic_set(&rdev->ih.lock, 0);
6443 if (!rdev->has_uvd)
6448 dev_err(rdev->dev, "failed UVD (%d) init.\n", r);
6450 * At this point rdev->uvd.vcpu_bo is NULL which trickles down
6455 rdev->has_uvd = false;
6458 rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL;
6459 r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX], 4096);
6466 if (!rdev->has_uvd)
6471 dev_err(rdev->dev, "failed UVD resume (%d).\n", r);
6476 dev_err(rdev->dev, "failed initializing UVD fences (%d).\n", r);
6482 rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
6490 if (!rdev->has_uvd || !rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size)
6493 ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
6494 r = radeon_ring_init(rdev, ring, ring->ring_size, 0, PACKET0(UVD_NO_OP, 0));
6496 dev_err(rdev->dev, "failed initializing UVD ring (%d).\n", r);
6501 dev_err(rdev->dev, "failed initializing UVD (%d).\n", r);
6510 if (!rdev->has_vce)
6515 dev_err(rdev->dev, "failed VCE (%d) init.\n", r);
6517 * At this point rdev->vce.vcpu_bo is NULL which trickles down
6522 rdev->has_vce = false;
6525 rdev->ring[TN_RING_TYPE_VCE1_INDEX].ring_obj = NULL;
6526 r600_ring_init(rdev, &rdev->ring[TN_RING_TYPE_VCE1_INDEX], 4096);
6527 rdev->ring[TN_RING_TYPE_VCE2_INDEX].ring_obj = NULL;
6528 r600_ring_init(rdev, &rdev->ring[TN_RING_TYPE_VCE2_INDEX], 4096);
6535 if (!rdev->has_vce)
6540 dev_err(rdev->dev, "failed VCE resume (%d).\n", r);
6545 dev_err(rdev->dev, "failed VCE resume (%d).\n", r);
6550 dev_err(rdev->dev, "failed initializing VCE1 fences (%d).\n", r);
6555 dev_err(rdev->dev, "failed initializing VCE2 fences (%d).\n", r);
6561 rdev->ring[TN_RING_TYPE_VCE1_INDEX].ring_size = 0;
6562 rdev->ring[TN_RING_TYPE_VCE2_INDEX].ring_size = 0;
6570 if (!rdev->has_vce || !rdev->ring[TN_RING_TYPE_VCE1_INDEX].ring_size)
6573 ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX];
6574 r = radeon_ring_init(rdev, ring, ring->ring_size, 0, VCE_CMD_NO_OP);
6576 dev_err(rdev->dev, "failed initializing VCE1 ring (%d).\n", r);
6579 ring = &rdev->ring[TN_RING_TYPE_VCE2_INDEX];
6580 r = radeon_ring_init(rdev, ring, ring->ring_size, 0, VCE_CMD_NO_OP);
6582 dev_err(rdev->dev, "failed initializing VCE1 ring (%d).\n", r);
6587 dev_err(rdev->dev, "failed initializing VCE (%d).\n", r);
6609 if (!rdev->pm.dpm_enabled) {
6623 if (rdev->family == CHIP_VERDE) {
6624 rdev->rlc.reg_list = verde_rlc_save_restore_register_list;
6625 rdev->rlc.reg_list_size =
6628 rdev->rlc.cs_data = si_cs_data;
6642 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
6648 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
6654 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
6660 dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
6666 dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
6674 if (!rdev->irq.installed) {
6688 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
6689 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
6694 ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
6695 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP1_RPTR_OFFSET,
6700 ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
6701 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP2_RPTR_OFFSET,
6706 ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
6707 r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
6712 ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
6713 r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET,
6734 dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
6740 dev_err(rdev->dev, "vm manager initialization failed (%d).\n", r);
6760 atom_asic_init(rdev->mode_info.atom_context);
6765 if (rdev->pm.pm_method == PM_METHOD_DPM)
6768 rdev->accel_working = true;
6772 rdev->accel_working = false;
6787 if (rdev->has_uvd) {
6791 if (rdev->has_vce)
6809 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
6815 return -EINVAL;
6818 if (!rdev->is_atom_bios) {
6819 dev_err(rdev->dev, "Expecting atombios for cayman GPU\n");
6820 return -EINVAL;
6828 if (!rdev->bios) {
6829 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
6830 return -EINVAL;
6833 atom_asic_init(rdev->mode_info.atom_context);
6856 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw ||
6857 !rdev->rlc_fw || !rdev->mc_fw) {
6868 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
6869 ring->ring_obj = NULL;
6872 ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
6873 ring->ring_obj = NULL;
6876 ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
6877 ring->ring_obj = NULL;
6880 ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
6881 ring->ring_obj = NULL;
6884 ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
6885 ring->ring_obj = NULL;
6891 rdev->ih.ring_obj = NULL;
6898 rdev->accel_working = true;
6901 dev_err(rdev->dev, "disabling GPU acceleration\n");
6911 rdev->accel_working = false;
6918 if (!rdev->mc_fw) {
6920 return -EINVAL;
6939 if (rdev->has_uvd) {
6943 if (rdev->has_vce)
6951 kfree(rdev->bios);
6952 rdev->bios = NULL;
6956 * si_get_gpu_clock_counter - return GPU clock counter snapshot
6967 mutex_lock(&rdev->gpu_clock_mutex);
6971 mutex_unlock(&rdev->gpu_clock_mutex);
7066 struct pci_dev *root = rdev->pdev->bus->self;
7072 if (pci_is_root_bus(rdev->pdev->bus))
7078 if (rdev->flags & RADEON_IS_IGP)
7081 if (!(rdev->flags & RADEON_IS_PCIE))
7109 if (!pci_is_pcie(root) || !pci_is_pcie(rdev->pdev))
7113 /* re-try equalization if gen3 is not already enabled */
7120 pcie_capability_set_word(rdev->pdev, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_HAWD);
7138 pcie_capability_read_word(rdev->pdev,
7146 pcie_capability_read_word(rdev->pdev,
7152 pcie_capability_read_word(rdev->pdev,
7171 pcie_capability_clear_and_set_word(rdev->pdev, PCI_EXP_LNKCTL,
7183 pcie_capability_clear_and_set_word(rdev->pdev, PCI_EXP_LNKCTL2,
7209 pcie_capability_clear_and_set_word(rdev->pdev, PCI_EXP_LNKCTL2,
7216 for (i = 0; i < rdev->usec_timeout; i++) {
7233 if (!(rdev->flags & RADEON_IS_PCIE))
7291 if ((rdev->family != CHIP_OLAND) && (rdev->family != CHIP_HAINAN)) {
7340 if ((rdev->family == CHIP_OLAND) || (rdev->family == CHIP_HAINAN))
7347 if ((rdev->family == CHIP_OLAND) || (rdev->family == CHIP_HAINAN))
7353 !pci_is_root_bus(rdev->pdev->bus)) {
7354 struct pci_dev *root = rdev->pdev->bus->self;
7454 return -ETIMEDOUT;