Lines Matching defs:adev
58 static void gfx_v7_0_set_ring_funcs(struct amdgpu_device *adev);
59 static void gfx_v7_0_set_irq_funcs(struct amdgpu_device *adev);
60 static void gfx_v7_0_set_gds_init(struct amdgpu_device *adev);
882 static u32 gfx_v7_0_get_csb_size(struct amdgpu_device *adev);
883 static void gfx_v7_0_get_csb_buffer(struct amdgpu_device *adev, volatile u32 *buffer);
884 static void gfx_v7_0_init_pg(struct amdgpu_device *adev);
885 static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev);
887 static void gfx_v7_0_free_microcode(struct amdgpu_device *adev)
889 amdgpu_ucode_release(&adev->gfx.pfp_fw);
890 amdgpu_ucode_release(&adev->gfx.me_fw);
891 amdgpu_ucode_release(&adev->gfx.ce_fw);
892 amdgpu_ucode_release(&adev->gfx.mec_fw);
893 amdgpu_ucode_release(&adev->gfx.mec2_fw);
894 amdgpu_ucode_release(&adev->gfx.rlc_fw);
903 * @adev: amdgpu_device pointer
909 static int gfx_v7_0_init_microcode(struct amdgpu_device *adev)
916 switch (adev->asic_type) {
936 err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw,
942 err = amdgpu_ucode_request(adev, &adev->gfx.me_fw,
948 err = amdgpu_ucode_request(adev, &adev->gfx.ce_fw,
954 err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw,
960 if (adev->asic_type == CHIP_KAVERI) {
961 err = amdgpu_ucode_request(adev, &adev->gfx.mec2_fw,
968 err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
974 gfx_v7_0_free_microcode(adev);
982 * @adev: amdgpu_device pointer
990 static void gfx_v7_0_tiling_mode_table_init(struct amdgpu_device *adev)
993 ARRAY_SIZE(adev->gfx.config.tile_mode_array);
995 ARRAY_SIZE(adev->gfx.config.macrotile_mode_array);
999 tile = adev->gfx.config.tile_mode_array;
1000 macrotile = adev->gfx.config.macrotile_mode_array;
1002 switch (adev->gfx.config.mem_row_size_in_kb) {
1020 switch (adev->asic_type) {
1547 * @adev: amdgpu_device pointer
1555 static void gfx_v7_0_select_se_sh(struct amdgpu_device *adev,
1584 * @adev: amdgpu_device pointer
1589 static u32 gfx_v7_0_get_rb_active_bitmap(struct amdgpu_device *adev)
1599 mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_backends_per_se /
1600 adev->gfx.config.max_sh_per_se);
1606 gfx_v7_0_raster_config(struct amdgpu_device *adev, u32 *rconf, u32 *rconf1)
1608 switch (adev->asic_type) {
1632 DRM_ERROR("unknown asic: 0x%x\n", adev->asic_type);
1638 gfx_v7_0_write_harvested_raster_configs(struct amdgpu_device *adev,
1642 unsigned sh_per_se = max_t(unsigned, adev->gfx.config.max_sh_per_se, 1);
1643 unsigned num_se = max_t(unsigned, adev->gfx.config.max_shader_engines, 1);
1737 gfx_v7_0_select_se_sh(adev, se, 0xffffffff, 0xffffffff, 0);
1743 gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
1749 * @adev: amdgpu_device pointer
1753 static void gfx_v7_0_setup_rb(struct amdgpu_device *adev)
1759 u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se /
1760 adev->gfx.config.max_sh_per_se;
1763 mutex_lock(&adev->grbm_idx_mutex);
1764 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
1765 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
1766 gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff, 0);
1767 data = gfx_v7_0_get_rb_active_bitmap(adev);
1768 active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) *
1772 gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
1774 adev->gfx.config.backend_enable_mask = active_rbs;
1775 adev->gfx.config.num_rbs = hweight32(active_rbs);
1777 num_rb_pipes = min_t(unsigned, adev->gfx.config.max_backends_per_se *
1778 adev->gfx.config.max_shader_engines, 16);
1780 gfx_v7_0_raster_config(adev, &raster_config, &raster_config_1);
1782 if (!adev->gfx.config.backend_enable_mask ||
1783 adev->gfx.config.num_rbs >= num_rb_pipes) {
1787 gfx_v7_0_write_harvested_raster_configs(adev, raster_config, raster_config_1,
1788 adev->gfx.config.backend_enable_mask,
1793 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
1794 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
1795 gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff, 0);
1796 adev->gfx.config.rb_config[i][j].rb_backend_disable =
1798 adev->gfx.config.rb_config[i][j].user_rb_backend_disable =
1800 adev->gfx.config.rb_config[i][j].raster_config =
1802 adev->gfx.config.rb_config[i][j].raster_config_1 =
1806 gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
1807 mutex_unlock(&adev->grbm_idx_mutex);
1814 * @adev: amdgpu_device pointer
1819 static void gfx_v7_0_init_compute_vmid(struct amdgpu_device *adev)
1835 mutex_lock(&adev->srbm_mutex);
1836 for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
1837 cik_srbm_select(adev, 0, 0, 0, i);
1844 cik_srbm_select(adev, 0, 0, 0, 0);
1845 mutex_unlock(&adev->srbm_mutex);
1849 for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
1857 static void gfx_v7_0_init_gds_vmid(struct amdgpu_device *adev)
1875 static void gfx_v7_0_config_init(struct amdgpu_device *adev)
1877 adev->gfx.config.double_offchip_lds_buf = 1;
1883 * @adev: amdgpu_device pointer
1888 static void gfx_v7_0_constants_init(struct amdgpu_device *adev)
1896 WREG32(mmGB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
1897 WREG32(mmHDP_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
1898 WREG32(mmDMIF_ADDR_CALC, adev->gfx.config.gb_addr_config);
1900 gfx_v7_0_tiling_mode_table_init(adev);
1902 gfx_v7_0_setup_rb(adev);
1903 gfx_v7_0_get_cu_info(adev);
1904 gfx_v7_0_config_init(adev);
1911 mutex_lock(&adev->grbm_idx_mutex);
1916 gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
1936 mutex_lock(&adev->srbm_mutex);
1937 for (i = 0; i < adev->vm_manager.id_mgr[0].num_ids; i++) {
1941 sh_mem_base = adev->gmc.shared_aperture_start >> 48;
1942 cik_srbm_select(adev, 0, 0, 0, i);
1949 cik_srbm_select(adev, 0, 0, 0, 0);
1950 mutex_unlock(&adev->srbm_mutex);
1952 gfx_v7_0_init_compute_vmid(adev);
1953 gfx_v7_0_init_gds_vmid(adev);
1982 ((adev->gfx.config.sc_prim_fifo_size_frontend << PA_SC_FIFO_SIZE__SC_FRONTEND_PRIM_FIFO_SIZE__SHIFT) |
1983 (adev->gfx.config.sc_prim_fifo_size_backend << PA_SC_FIFO_SIZE__SC_BACKEND_PRIM_FIFO_SIZE__SHIFT) |
1984 (adev->gfx.config.sc_hiz_tile_fifo_size << PA_SC_FIFO_SIZE__SC_HIZ_TILE_FIFO_SIZE__SHIFT) |
1985 (adev->gfx.config.sc_earlyz_tile_fifo_size << PA_SC_FIFO_SIZE__SC_EARLYZ_TILE_FIFO_SIZE__SHIFT)));
2015 mutex_unlock(&adev->grbm_idx_mutex);
2032 struct amdgpu_device *adev = ring->adev;
2047 for (i = 0; i < adev->usec_timeout; i++) {
2053 if (i >= adev->usec_timeout)
2253 amdgpu_ring_write(ring, ring->adev->gds.gds_compute_max_wave_id);
2298 struct amdgpu_device *adev = ring->adev;
2306 r = amdgpu_ib_get(adev, NULL, 256, AMDGPU_IB_POOL_DIRECT, &ib);
2364 * @adev: amdgpu_device pointer
2369 static void gfx_v7_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
2383 * @adev: amdgpu_device pointer
2388 static int gfx_v7_0_cp_gfx_load_microcode(struct amdgpu_device *adev)
2396 if (!adev->gfx.me_fw || !adev->gfx.pfp_fw || !adev->gfx.ce_fw)
2399 pfp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
2400 ce_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
2401 me_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
2406 adev->gfx.pfp_fw_version = le32_to_cpu(pfp_hdr->header.ucode_version);
2407 adev->gfx.ce_fw_version = le32_to_cpu(ce_hdr->header.ucode_version);
2408 adev->gfx.me_fw_version = le32_to_cpu(me_hdr->header.ucode_version);
2409 adev->gfx.me_feature_version = le32_to_cpu(me_hdr->ucode_feature_version);
2410 adev->gfx.ce_feature_version = le32_to_cpu(ce_hdr->ucode_feature_version);
2411 adev->gfx.pfp_feature_version = le32_to_cpu(pfp_hdr->ucode_feature_version);
2413 gfx_v7_0_cp_gfx_enable(adev, false);
2417 (adev->gfx.pfp_fw->data +
2423 WREG32(mmCP_PFP_UCODE_ADDR, adev->gfx.pfp_fw_version);
2427 (adev->gfx.ce_fw->data +
2433 WREG32(mmCP_CE_UCODE_ADDR, adev->gfx.ce_fw_version);
2437 (adev->gfx.me_fw->data +
2443 WREG32(mmCP_ME_RAM_WADDR, adev->gfx.me_fw_version);
2451 * @adev: amdgpu_device pointer
2457 static int gfx_v7_0_cp_gfx_start(struct amdgpu_device *adev)
2459 struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0];
2465 WREG32(mmCP_MAX_CONTEXT, adev->gfx.config.max_hw_contexts - 1);
2469 gfx_v7_0_cp_gfx_enable(adev, true);
2471 r = amdgpu_ring_alloc(ring, gfx_v7_0_get_csb_size(adev) + 8);
2491 for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
2505 amdgpu_ring_write(ring, adev->gfx.config.rb_config[0][0].raster_config);
2506 amdgpu_ring_write(ring, adev->gfx.config.rb_config[0][0].raster_config_1);
2527 * @adev: amdgpu_device pointer
2533 static int gfx_v7_0_cp_gfx_resume(struct amdgpu_device *adev)
2542 if (adev->asic_type != CHIP_HAWAII)
2555 ring = &adev->gfx.gfx_ring[0];
2584 gfx_v7_0_cp_gfx_start(adev);
2599 struct amdgpu_device *adev = ring->adev;
2606 struct amdgpu_device *adev = ring->adev;
2620 struct amdgpu_device *adev = ring->adev;
2630 * @adev: amdgpu_device pointer
2635 static void gfx_v7_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
2648 * @adev: amdgpu_device pointer
2653 static int gfx_v7_0_cp_compute_load_microcode(struct amdgpu_device *adev)
2659 if (!adev->gfx.mec_fw)
2662 mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
2664 adev->gfx.mec_fw_version = le32_to_cpu(mec_hdr->header.ucode_version);
2665 adev->gfx.mec_feature_version = le32_to_cpu(
2668 gfx_v7_0_cp_compute_enable(adev, false);
2672 (adev->gfx.mec_fw->data +
2680 if (adev->asic_type == CHIP_KAVERI) {
2683 if (!adev->gfx.mec2_fw)
2686 mec2_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
2688 adev->gfx.mec2_fw_version = le32_to_cpu(mec2_hdr->header.ucode_version);
2689 adev->gfx.mec2_feature_version = le32_to_cpu(
2694 (adev->gfx.mec2_fw->data +
2709 * @adev: amdgpu_device pointer
2714 static void gfx_v7_0_cp_compute_fini(struct amdgpu_device *adev)
2718 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
2719 struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
2725 static void gfx_v7_0_mec_fini(struct amdgpu_device *adev)
2727 amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
2730 static int gfx_v7_0_mec_init(struct amdgpu_device *adev)
2736 bitmap_zero(adev->gfx.mec_bitmap[0].queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
2739 amdgpu_gfx_compute_queue_acquire(adev);
2742 mec_hpd_size = adev->gfx.mec.num_mec * adev->gfx.mec.num_pipe_per_mec
2745 r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
2748 &adev->gfx.mec.hpd_eop_obj,
2749 &adev->gfx.mec.hpd_eop_gpu_addr,
2752 dev_warn(adev->dev, "(%d) create, pin or map of HDP EOP bo failed\n", r);
2753 gfx_v7_0_mec_fini(adev);
2760 amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
2761 amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
2766 static void gfx_v7_0_compute_pipe_init(struct amdgpu_device *adev,
2771 size_t eop_offset = (mec * adev->gfx.mec.num_pipe_per_mec + pipe)
2774 mutex_lock(&adev->srbm_mutex);
2775 eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr + eop_offset;
2777 cik_srbm_select(adev, mec + 1, pipe, 0, 0);
2792 cik_srbm_select(adev, 0, 0, 0, 0);
2793 mutex_unlock(&adev->srbm_mutex);
2796 static int gfx_v7_0_mqd_deactivate(struct amdgpu_device *adev)
2803 for (i = 0; i < adev->usec_timeout; i++) {
2809 if (i == adev->usec_timeout)
2820 static void gfx_v7_0_mqd_init(struct amdgpu_device *adev,
2940 static int gfx_v7_0_mqd_commit(struct amdgpu_device *adev, struct cik_mqd *mqd)
2965 static int gfx_v7_0_compute_queue_init(struct amdgpu_device *adev, int ring_id)
2970 struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id];
2972 r = amdgpu_bo_create_reserved(adev, sizeof(struct cik_mqd), PAGE_SIZE,
2976 dev_warn(adev->dev, "(%d) create MQD bo failed\n", r);
2980 mutex_lock(&adev->srbm_mutex);
2981 cik_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
2983 gfx_v7_0_mqd_init(adev, mqd, mqd_gpu_addr, ring);
2984 gfx_v7_0_mqd_deactivate(adev);
2985 gfx_v7_0_mqd_commit(adev, mqd);
2987 cik_srbm_select(adev, 0, 0, 0, 0);
2988 mutex_unlock(&adev->srbm_mutex);
2998 * @adev: amdgpu_device pointer
3004 static int gfx_v7_0_cp_compute_resume(struct amdgpu_device *adev)
3016 for (i = 0; i < adev->gfx.mec.num_mec; i++)
3017 for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++)
3018 gfx_v7_0_compute_pipe_init(adev, i, j);
3021 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3022 r = gfx_v7_0_compute_queue_init(adev, i);
3024 gfx_v7_0_cp_compute_fini(adev);
3029 gfx_v7_0_cp_compute_enable(adev, true);
3031 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3032 ring = &adev->gfx.compute_ring[i];
3039 static void gfx_v7_0_cp_enable(struct amdgpu_device *adev, bool enable)
3041 gfx_v7_0_cp_gfx_enable(adev, enable);
3042 gfx_v7_0_cp_compute_enable(adev, enable);
3045 static int gfx_v7_0_cp_load_microcode(struct amdgpu_device *adev)
3049 r = gfx_v7_0_cp_gfx_load_microcode(adev);
3052 r = gfx_v7_0_cp_compute_load_microcode(adev);
3059 static void gfx_v7_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
3073 static int gfx_v7_0_cp_resume(struct amdgpu_device *adev)
3077 gfx_v7_0_enable_gui_idle_interrupt(adev, false);
3079 r = gfx_v7_0_cp_load_microcode(adev);
3083 r = gfx_v7_0_cp_gfx_resume(adev);
3086 r = gfx_v7_0_cp_compute_resume(adev);
3090 gfx_v7_0_enable_gui_idle_interrupt(adev, true);
3194 static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
3202 if (adev->flags & AMD_IS_APU) {
3203 if (adev->asic_type == CHIP_KAVERI) {
3204 adev->gfx.rlc.reg_list = spectre_rlc_save_restore_register_list;
3205 adev->gfx.rlc.reg_list_size =
3208 adev->gfx.rlc.reg_list = kalindi_rlc_save_restore_register_list;
3209 adev->gfx.rlc.reg_list_size =
3213 adev->gfx.rlc.cs_data = ci_cs_data;
3214 adev->gfx.rlc.cp_table_size = ALIGN(CP_ME_TABLE_SIZE * 5 * 4, 2048); /* CP JT */
3215 adev->gfx.rlc.cp_table_size += 64 * 1024; /* GDS */
3217 src_ptr = adev->gfx.rlc.reg_list;
3218 dws = adev->gfx.rlc.reg_list_size;
3221 cs_data = adev->gfx.rlc.cs_data;
3225 r = amdgpu_gfx_rlc_init_sr(adev, dws);
3232 r = amdgpu_gfx_rlc_init_csb(adev);
3237 if (adev->gfx.rlc.cp_table_size) {
3238 r = amdgpu_gfx_rlc_init_cpt(adev);
3244 if (adev->gfx.rlc.funcs->update_spm_vmid)
3245 adev->gfx.rlc.funcs->update_spm_vmid(adev, NULL, 0xf);
3250 static void gfx_v7_0_enable_lbpw(struct amdgpu_device *adev, bool enable)
3262 static void gfx_v7_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
3267 mutex_lock(&adev->grbm_idx_mutex);
3268 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
3269 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
3270 gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff, 0);
3271 for (k = 0; k < adev->usec_timeout; k++) {
3278 gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
3279 mutex_unlock(&adev->grbm_idx_mutex);
3285 for (k = 0; k < adev->usec_timeout; k++) {
3292 static void gfx_v7_0_update_rlc(struct amdgpu_device *adev, u32 rlc)
3301 static u32 gfx_v7_0_halt_rlc(struct amdgpu_device *adev)
3313 for (i = 0; i < adev->usec_timeout; i++) {
3319 gfx_v7_0_wait_for_rlc_serdes(adev);
3325 static bool gfx_v7_0_is_rlc_enabled(struct amdgpu_device *adev)
3330 static void gfx_v7_0_set_safe_mode(struct amdgpu_device *adev, int xcc_id)
3339 for (i = 0; i < adev->usec_timeout; i++) {
3345 for (i = 0; i < adev->usec_timeout; i++) {
3352 static void gfx_v7_0_unset_safe_mode(struct amdgpu_device *adev, int xcc_id)
3363 * @adev: amdgpu_device pointer
3367 static void gfx_v7_0_rlc_stop(struct amdgpu_device *adev)
3371 gfx_v7_0_enable_gui_idle_interrupt(adev, false);
3373 gfx_v7_0_wait_for_rlc_serdes(adev);
3379 * @adev: amdgpu_device pointer
3383 static void gfx_v7_0_rlc_start(struct amdgpu_device *adev)
3387 gfx_v7_0_enable_gui_idle_interrupt(adev, true);
3392 static void gfx_v7_0_rlc_reset(struct amdgpu_device *adev)
3407 * @adev: amdgpu_device pointer
3413 static int gfx_v7_0_rlc_resume(struct amdgpu_device *adev)
3420 if (!adev->gfx.rlc_fw)
3423 hdr = (const struct rlc_firmware_header_v1_0 *)adev->gfx.rlc_fw->data;
3425 adev->gfx.rlc_fw_version = le32_to_cpu(hdr->header.ucode_version);
3426 adev->gfx.rlc_feature_version = le32_to_cpu(
3429 adev->gfx.rlc.funcs->stop(adev);
3435 adev->gfx.rlc.funcs->reset(adev);
3437 gfx_v7_0_init_pg(adev);
3442 mutex_lock(&adev->grbm_idx_mutex);
3443 gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
3447 mutex_unlock(&adev->grbm_idx_mutex);
3453 (adev->gfx.rlc_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
3458 WREG32(mmRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version);
3461 gfx_v7_0_enable_lbpw(adev, false);
3463 if (adev->asic_type == CHIP_BONAIRE)
3466 adev->gfx.rlc.funcs->start(adev);
3471 static void gfx_v7_0_update_spm_vmid(struct amdgpu_device *adev, struct amdgpu_ring *ring, unsigned vmid)
3475 amdgpu_gfx_off_ctrl(adev, false);
3484 amdgpu_gfx_off_ctrl(adev, true);
3487 static void gfx_v7_0_enable_cgcg(struct amdgpu_device *adev, bool enable)
3493 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
3494 gfx_v7_0_enable_gui_idle_interrupt(adev, true);
3496 tmp = gfx_v7_0_halt_rlc(adev);
3498 mutex_lock(&adev->grbm_idx_mutex);
3499 gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
3506 mutex_unlock(&adev->grbm_idx_mutex);
3508 gfx_v7_0_update_rlc(adev, tmp);
3515 gfx_v7_0_enable_gui_idle_interrupt(adev, false);
3526 gfx_v7_0_enable_gui_idle_interrupt(adev, true);
3530 static void gfx_v7_0_enable_mgcg(struct amdgpu_device *adev, bool enable)
3534 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
3535 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
3536 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) {
3550 tmp = gfx_v7_0_halt_rlc(adev);
3552 mutex_lock(&adev->grbm_idx_mutex);
3553 gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
3559 mutex_unlock(&adev->grbm_idx_mutex);
3561 gfx_v7_0_update_rlc(adev, tmp);
3563 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGTS) {
3569 if ((adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) &&
3570 (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGTS_LS))
3601 tmp = gfx_v7_0_halt_rlc(adev);
3603 mutex_lock(&adev->grbm_idx_mutex);
3604 gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
3609 mutex_unlock(&adev->grbm_idx_mutex);
3611 gfx_v7_0_update_rlc(adev, tmp);
3615 static void gfx_v7_0_update_cg(struct amdgpu_device *adev,
3618 gfx_v7_0_enable_gui_idle_interrupt(adev, false);
3621 gfx_v7_0_enable_mgcg(adev, true);
3622 gfx_v7_0_enable_cgcg(adev, true);
3624 gfx_v7_0_enable_cgcg(adev, false);
3625 gfx_v7_0_enable_mgcg(adev, false);
3627 gfx_v7_0_enable_gui_idle_interrupt(adev, true);
3630 static void gfx_v7_0_enable_sclk_slowdown_on_pu(struct amdgpu_device *adev,
3636 if (enable && (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS))
3644 static void gfx_v7_0_enable_sclk_slowdown_on_pd(struct amdgpu_device *adev,
3650 if (enable && (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS))
3658 static void gfx_v7_0_enable_cp_pg(struct amdgpu_device *adev, bool enable)
3663 if (enable && (adev->pg_flags & AMD_PG_SUPPORT_CP))
3671 static void gfx_v7_0_enable_gds_pg(struct amdgpu_device *adev, bool enable)
3676 if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GDS))
3684 static int gfx_v7_0_cp_pg_table_num(struct amdgpu_device *adev)
3686 if (adev->asic_type == CHIP_KAVERI)
3692 static void gfx_v7_0_enable_gfx_cgpg(struct amdgpu_device *adev,
3697 if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) {
3722 static void gfx_v7_0_set_user_cu_inactive_bitmap(struct amdgpu_device *adev,
3736 static u32 gfx_v7_0_get_cu_active_bitmap(struct amdgpu_device *adev)
3746 mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_cu_per_sh);
3751 static void gfx_v7_0_init_ao_cu_mask(struct amdgpu_device *adev)
3755 WREG32(mmRLC_PG_ALWAYS_ON_CU_MASK, adev->gfx.cu_info.ao_cu_mask);
3759 tmp |= (adev->gfx.cu_info.number << RLC_MAX_PG_CU__MAX_POWERED_UP_CU__SHIFT);
3763 static void gfx_v7_0_enable_gfx_static_mgpg(struct amdgpu_device *adev,
3769 if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG))
3777 static void gfx_v7_0_enable_gfx_dynamic_mgpg(struct amdgpu_device *adev,
3783 if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG))
3794 static void gfx_v7_0_init_gfx_cgpg(struct amdgpu_device *adev)
3799 if (adev->gfx.rlc.cs_data) {
3801 WREG32(mmRLC_GPM_SCRATCH_DATA, upper_32_bits(adev->gfx.rlc.clear_state_gpu_addr));
3802 WREG32(mmRLC_GPM_SCRATCH_DATA, lower_32_bits(adev->gfx.rlc.clear_state_gpu_addr));
3803 WREG32(mmRLC_GPM_SCRATCH_DATA, adev->gfx.rlc.clear_state_size);
3809 if (adev->gfx.rlc.reg_list) {
3811 for (i = 0; i < adev->gfx.rlc.reg_list_size; i++)
3812 WREG32(mmRLC_GPM_SCRATCH_DATA, adev->gfx.rlc.reg_list[i]);
3820 WREG32(mmRLC_SAVE_AND_RESTORE_BASE, adev->gfx.rlc.save_restore_gpu_addr >> 8);
3821 WREG32(mmRLC_JUMP_TABLE_RESTORE, adev->gfx.rlc.cp_table_gpu_addr >> 8);
3843 static void gfx_v7_0_update_gfx_pg(struct amdgpu_device *adev, bool enable)
3845 gfx_v7_0_enable_gfx_cgpg(adev, enable);
3846 gfx_v7_0_enable_gfx_static_mgpg(adev, enable);
3847 gfx_v7_0_enable_gfx_dynamic_mgpg(adev, enable);
3850 static u32 gfx_v7_0_get_csb_size(struct amdgpu_device *adev)
3856 if (adev->gfx.rlc.cs_data == NULL)
3864 for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
3882 static void gfx_v7_0_get_csb_buffer(struct amdgpu_device *adev,
3889 if (adev->gfx.rlc.cs_data == NULL)
3901 for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
3917 switch (adev->asic_type) {
3948 static void gfx_v7_0_init_pg(struct amdgpu_device *adev)
3950 if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
3956 gfx_v7_0_enable_sclk_slowdown_on_pu(adev, true);
3957 gfx_v7_0_enable_sclk_slowdown_on_pd(adev, true);
3958 if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) {
3959 gfx_v7_0_init_gfx_cgpg(adev);
3960 gfx_v7_0_enable_cp_pg(adev, true);
3961 gfx_v7_0_enable_gds_pg(adev, true);
3963 gfx_v7_0_init_ao_cu_mask(adev);
3964 gfx_v7_0_update_gfx_pg(adev, true);
3968 static void gfx_v7_0_fini_pg(struct amdgpu_device *adev)
3970 if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
3976 gfx_v7_0_update_gfx_pg(adev, false);
3977 if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) {
3978 gfx_v7_0_enable_cp_pg(adev, false);
3979 gfx_v7_0_enable_gds_pg(adev, false);
3987 * @adev: amdgpu_device pointer
3992 static uint64_t gfx_v7_0_get_gpu_clock_counter(struct amdgpu_device *adev)
3996 mutex_lock(&adev->gfx.gpu_clock_mutex);
4000 mutex_unlock(&adev->gfx.gpu_clock_mutex);
4045 struct amdgpu_device *adev = ring->adev;
4055 static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t address)
4065 static void wave_read_regs(struct amdgpu_device *adev, uint32_t simd,
4080 static void gfx_v7_0_read_wave_data(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields)
4084 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_STATUS);
4085 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_LO);
4086 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_HI);
4087 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_LO);
4088 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_HI);
4089 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_HW_ID);
4090 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW0);
4091 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW1);
4092 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_GPR_ALLOC);
4093 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_LDS_ALLOC);
4094 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TRAPSTS);
4095 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_STS);
4096 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TBA_LO);
4097 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TBA_HI);
4098 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TMA_LO);
4099 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TMA_HI);
4100 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_DBG0);
4101 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_M0);
4102 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_MODE);
4105 static void gfx_v7_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd,
4110 adev, simd, wave, 0,
4114 static void gfx_v7_0_select_me_pipe_q(struct amdgpu_device *adev,
4117 cik_srbm_select(adev, me, pipe, q, vm);
4145 struct amdgpu_device *adev = ip_block->adev;
4147 adev->gfx.xcc_mask = 1;
4148 adev->gfx.num_gfx_rings = GFX7_NUM_GFX_RINGS;
4149 adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev),
4151 adev->gfx.funcs = &gfx_v7_0_gfx_funcs;
4152 adev->gfx.rlc.funcs = &gfx_v7_0_rlc_funcs;
4153 gfx_v7_0_set_ring_funcs(adev);
4154 gfx_v7_0_set_irq_funcs(adev);
4155 gfx_v7_0_set_gds_init(adev);
4162 struct amdgpu_device *adev = ip_block->adev;
4165 r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0);
4169 r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0);
4176 static void gfx_v7_0_gpu_early_init(struct amdgpu_device *adev)
4183 switch (adev->asic_type) {
4185 adev->gfx.config.max_shader_engines = 2;
4186 adev->gfx.config.max_tile_pipes = 4;
4187 adev->gfx.config.max_cu_per_sh = 7;
4188 adev->gfx.config.max_sh_per_se = 1;
4189 adev->gfx.config.max_backends_per_se = 2;
4190 adev->gfx.config.max_texture_channel_caches = 4;
4191 adev->gfx.config.max_gprs = 256;
4192 adev->gfx.config.max_gs_threads = 32;
4193 adev->gfx.config.max_hw_contexts = 8;
4195 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
4196 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
4197 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
4198 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
4202 adev->gfx.config.max_shader_engines = 4;
4203 adev->gfx.config.max_tile_pipes = 16;
4204 adev->gfx.config.max_cu_per_sh = 11;
4205 adev->gfx.config.max_sh_per_se = 1;
4206 adev->gfx.config.max_backends_per_se = 4;
4207 adev->gfx.config.max_texture_channel_caches = 16;
4208 adev->gfx.config.max_gprs = 256;
4209 adev->gfx.config.max_gs_threads = 32;
4210 adev->gfx.config.max_hw_contexts = 8;
4212 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
4213 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
4214 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
4215 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
4219 adev->gfx.config.max_shader_engines = 1;
4220 adev->gfx.config.max_tile_pipes = 4;
4221 adev->gfx.config.max_cu_per_sh = 8;
4222 adev->gfx.config.max_backends_per_se = 2;
4223 adev->gfx.config.max_sh_per_se = 1;
4224 adev->gfx.config.max_texture_channel_caches = 4;
4225 adev->gfx.config.max_gprs = 256;
4226 adev->gfx.config.max_gs_threads = 16;
4227 adev->gfx.config.max_hw_contexts = 8;
4229 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
4230 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
4231 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
4232 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
4238 adev->gfx.config.max_shader_engines = 1;
4239 adev->gfx.config.max_tile_pipes = 2;
4240 adev->gfx.config.max_cu_per_sh = 2;
4241 adev->gfx.config.max_sh_per_se = 1;
4242 adev->gfx.config.max_backends_per_se = 1;
4243 adev->gfx.config.max_texture_channel_caches = 2;
4244 adev->gfx.config.max_gprs = 256;
4245 adev->gfx.config.max_gs_threads = 16;
4246 adev->gfx.config.max_hw_contexts = 8;
4248 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
4249 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
4250 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
4251 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
4256 adev->gfx.config.mc_arb_ramcfg = RREG32(mmMC_ARB_RAMCFG);
4257 mc_arb_ramcfg = adev->gfx.config.mc_arb_ramcfg;
4259 adev->gfx.config.num_banks = REG_GET_FIELD(mc_arb_ramcfg,
4261 adev->gfx.config.num_ranks = REG_GET_FIELD(mc_arb_ramcfg,
4264 adev->gfx.config.num_tile_pipes = adev->gfx.config.max_tile_pipes;
4265 adev->gfx.config.mem_max_burst_length_bytes = 256;
4266 if (adev->flags & AMD_IS_APU) {
4289 adev->gfx.config.mem_row_size_in_kb = 2;
4291 adev->gfx.config.mem_row_size_in_kb = 1;
4294 adev->gfx.config.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
4295 if (adev->gfx.config.mem_row_size_in_kb > 4)
4296 adev->gfx.config.mem_row_size_in_kb = 4;
4299 adev->gfx.config.shader_engine_tile_size = 32;
4300 adev->gfx.config.num_gpus = 1;
4301 adev->gfx.config.multi_gpu_tile_size = 64;
4305 switch (adev->gfx.config.mem_row_size_in_kb) {
4317 adev->gfx.config.gb_addr_config = gb_addr_config;
4320 static int gfx_v7_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
4325 struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id];
4334 ring->doorbell_index = adev->doorbell_index.mec_ring0 + ring_id;
4338 + ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
4342 r = amdgpu_ring_init(adev, ring, 1024,
4343 &adev->gfx.eop_irq, irq_type,
4355 struct amdgpu_device *adev = ip_block->adev;
4358 switch (adev->asic_type) {
4360 adev->gfx.mec.num_mec = 2;
4367 adev->gfx.mec.num_mec = 1;
4370 adev->gfx.mec.num_pipe_per_mec = 4;
4371 adev->gfx.mec.num_queue_per_pipe = 8;
4374 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 181, &adev->gfx.eop_irq);
4379 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 184,
4380 &adev->gfx.priv_reg_irq);
4385 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 185,
4386 &adev->gfx.priv_inst_irq);
4390 r = gfx_v7_0_init_microcode(adev);
4396 r = adev->gfx.rlc.funcs->init(adev);
4403 r = gfx_v7_0_mec_init(adev);
4409 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
4410 ring = &adev->gfx.gfx_ring[i];
4413 r = amdgpu_ring_init(adev, ring, 1024,
4414 &adev->gfx.eop_irq,
4423 for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
4424 for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
4425 for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
4426 if (!amdgpu_gfx_is_mec_queue_enabled(adev, 0, i,
4430 r = gfx_v7_0_compute_ring_init(adev,
4441 adev->gfx.ce_ram_size = 0x8000;
4443 gfx_v7_0_gpu_early_init(adev);
4450 struct amdgpu_device *adev = ip_block->adev;
4453 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
4454 amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
4455 for (i = 0; i < adev->gfx.num_compute_rings; i++)
4456 amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
4458 gfx_v7_0_cp_compute_fini(adev);
4459 amdgpu_gfx_rlc_fini(adev);
4460 gfx_v7_0_mec_fini(adev);
4461 amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
4462 &adev->gfx.rlc.clear_state_gpu_addr,
4463 (void **)&adev->gfx.rlc.cs_ptr);
4464 if (adev->gfx.rlc.cp_table_size) {
4465 amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
4466 &adev->gfx.rlc.cp_table_gpu_addr,
4467 (void **)&adev->gfx.rlc.cp_table_ptr);
4469 gfx_v7_0_free_microcode(adev);
4477 struct amdgpu_device *adev = ip_block->adev;
4479 gfx_v7_0_constants_init(adev);
4482 adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr);
4484 r = adev->gfx.rlc.funcs->resume(adev);
4488 r = gfx_v7_0_cp_resume(adev);
4497 struct amdgpu_device *adev = ip_block->adev;
4499 amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
4500 amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
4501 gfx_v7_0_cp_enable(adev, false);
4502 adev->gfx.rlc.funcs->stop(adev);
4503 gfx_v7_0_fini_pg(adev);
4520 struct amdgpu_device *adev = ip_block->adev;
4532 struct amdgpu_device *adev = ip_block->adev;
4534 for (i = 0; i < adev->usec_timeout; i++) {
4549 struct amdgpu_device *adev = ip_block->adev;
4579 gfx_v7_0_fini_pg(adev);
4580 gfx_v7_0_update_cg(adev, false);
4583 adev->gfx.rlc.funcs->stop(adev);
4594 dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
4608 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
4624 static void gfx_v7_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
4645 static void gfx_v7_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev,
4696 static int gfx_v7_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
4721 static int gfx_v7_0_set_priv_inst_fault_state(struct amdgpu_device *adev,
4746 static int gfx_v7_0_set_eop_interrupt_state(struct amdgpu_device *adev,
4753 gfx_v7_0_set_gfx_eop_interrupt_state(adev, state);
4756 gfx_v7_0_set_compute_eop_interrupt_state(adev, 1, 0, state);
4759 gfx_v7_0_set_compute_eop_interrupt_state(adev, 1, 1, state);
4762 gfx_v7_0_set_compute_eop_interrupt_state(adev, 1, 2, state);
4765 gfx_v7_0_set_compute_eop_interrupt_state(adev, 1, 3, state);
4768 gfx_v7_0_set_compute_eop_interrupt_state(adev, 2, 0, state);
4771 gfx_v7_0_set_compute_eop_interrupt_state(adev, 2, 1, state);
4774 gfx_v7_0_set_compute_eop_interrupt_state(adev, 2, 2, state);
4777 gfx_v7_0_set_compute_eop_interrupt_state(adev, 2, 3, state);
4785 static int gfx_v7_0_eop_irq(struct amdgpu_device *adev,
4798 amdgpu_fence_process(&adev->gfx.gfx_ring[0]);
4802 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
4803 ring = &adev->gfx.compute_ring[i];
4812 static void gfx_v7_0_fault(struct amdgpu_device *adev,
4823 drm_sched_fault(&adev->gfx.gfx_ring[0].sched);
4827 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
4828 ring = &adev->gfx.compute_ring[i];
4836 static int gfx_v7_0_priv_reg_irq(struct amdgpu_device *adev,
4841 gfx_v7_0_fault(adev, entry);
4845 static int gfx_v7_0_priv_inst_irq(struct amdgpu_device *adev,
4851 gfx_v7_0_fault(adev, entry);
4859 struct amdgpu_device *adev = ip_block->adev;
4864 gfx_v7_0_enable_gui_idle_interrupt(adev, false);
4867 gfx_v7_0_enable_mgcg(adev, true);
4868 gfx_v7_0_enable_cgcg(adev, true);
4870 gfx_v7_0_enable_cgcg(adev, false);
4871 gfx_v7_0_enable_mgcg(adev, false);
4873 gfx_v7_0_enable_gui_idle_interrupt(adev, true);
4882 struct amdgpu_device *adev = ip_block->adev;
4887 if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
4893 gfx_v7_0_update_gfx_pg(adev, gate);
4894 if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) {
4895 gfx_v7_0_enable_cp_pg(adev, gate);
4896 gfx_v7_0_enable_gds_pg(adev, gate);
4959 struct amdgpu_device *adev = ring->adev;
4960 struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
4966 if (amdgpu_sriov_vf(adev))
5083 static void gfx_v7_0_set_ring_funcs(struct amdgpu_device *adev)
5087 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
5088 adev->gfx.gfx_ring[i].funcs = &gfx_v7_0_ring_funcs_gfx;
5089 for (i = 0; i < adev->gfx.num_compute_rings; i++)
5090 adev->gfx.compute_ring[i].funcs = &gfx_v7_0_ring_funcs_compute;
5108 static void gfx_v7_0_set_irq_funcs(struct amdgpu_device *adev)
5110 adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
5111 adev->gfx.eop_irq.funcs = &gfx_v7_0_eop_irq_funcs;
5113 adev->gfx.priv_reg_irq.num_types = 1;
5114 adev->gfx.priv_reg_irq.funcs = &gfx_v7_0_priv_reg_irq_funcs;
5116 adev->gfx.priv_inst_irq.num_types = 1;
5117 adev->gfx.priv_inst_irq.funcs = &gfx_v7_0_priv_inst_irq_funcs;
5120 static void gfx_v7_0_set_gds_init(struct amdgpu_device *adev)
5123 adev->gds.gds_size = RREG32(mmGDS_VMID0_SIZE);
5124 adev->gds.gws_size = 64;
5125 adev->gds.oa_size = 16;
5126 adev->gds.gds_compute_max_wave_id = RREG32(mmGDS_COMPUTE_MAX_WAVE_ID);
5130 static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev)
5134 struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info;
5138 if (adev->flags & AMD_IS_APU)
5141 ao_cu_num = adev->gfx.config.max_cu_per_sh;
5147 mutex_lock(&adev->grbm_idx_mutex);
5148 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
5149 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
5153 gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff, 0);
5156 adev, disable_masks[i * 2 + j]);
5157 bitmap = gfx_v7_0_get_cu_active_bitmap(adev);
5160 for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) {
5174 gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
5175 mutex_unlock(&adev->grbm_idx_mutex);