1 /*
2 * Copyright 2020 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23 #include "amdgpu.h"
24 #include "hdp_v4_0.h"
25 #include "amdgpu_ras.h"
26
27 #include "hdp/hdp_4_0_offset.h"
28 #include "hdp/hdp_4_0_sh_mask.h"
29 #include <uapi/linux/kfd_ioctl.h>
30
31 /* for Vega20 register name change */
32 #define mmHDP_MEM_POWER_CTRL 0x00d4
33 #define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK 0x00000001L
34 #define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK 0x00000002L
35 #define HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK 0x00010000L
36 #define HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK 0x00020000L
37 #define mmHDP_MEM_POWER_CTRL_BASE_IDX 0
38
hdp_v4_0_invalidate_hdp(struct amdgpu_device * adev,struct amdgpu_ring * ring)39 static void hdp_v4_0_invalidate_hdp(struct amdgpu_device *adev,
40 struct amdgpu_ring *ring)
41 {
42 if (amdgpu_ip_version(adev, HDP_HWIP, 0) == IP_VERSION(4, 4, 0) ||
43 amdgpu_ip_version(adev, HDP_HWIP, 0) == IP_VERSION(4, 4, 2) ||
44 amdgpu_ip_version(adev, HDP_HWIP, 0) == IP_VERSION(4, 4, 5))
45 return;
46
47 if (!ring || !ring->funcs->emit_wreg) {
48 WREG32_SOC15_NO_KIQ(HDP, 0, mmHDP_READ_CACHE_INVALIDATE, 1);
49 RREG32_SOC15_NO_KIQ(HDP, 0, mmHDP_READ_CACHE_INVALIDATE);
50 } else {
51 amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET(
52 HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1);
53 }
54 }
55
hdp_v4_0_query_ras_error_count(struct amdgpu_device * adev,void * ras_error_status)56 static void hdp_v4_0_query_ras_error_count(struct amdgpu_device *adev,
57 void *ras_error_status)
58 {
59 struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
60
61 err_data->ue_count = 0;
62 err_data->ce_count = 0;
63
64 if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__HDP))
65 return;
66
67 /* HDP SRAM errors are uncorrectable ones (i.e. fatal errors) */
68 err_data->ue_count += RREG32_SOC15(HDP, 0, mmHDP_EDC_CNT);
69 };
70
hdp_v4_0_reset_ras_error_count(struct amdgpu_device * adev)71 static void hdp_v4_0_reset_ras_error_count(struct amdgpu_device *adev)
72 {
73 if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__HDP))
74 return;
75
76 if (amdgpu_ip_version(adev, HDP_HWIP, 0) >= IP_VERSION(4, 4, 0))
77 WREG32_SOC15(HDP, 0, mmHDP_EDC_CNT, 0);
78 else
79 /*read back hdp ras counter to reset it to 0 */
80 RREG32_SOC15(HDP, 0, mmHDP_EDC_CNT);
81 }
82
hdp_v4_0_update_clock_gating(struct amdgpu_device * adev,bool enable)83 static void hdp_v4_0_update_clock_gating(struct amdgpu_device *adev,
84 bool enable)
85 {
86 uint32_t def, data;
87
88 if (amdgpu_ip_version(adev, HDP_HWIP, 0) == IP_VERSION(4, 0, 0) ||
89 amdgpu_ip_version(adev, HDP_HWIP, 0) == IP_VERSION(4, 0, 1) ||
90 amdgpu_ip_version(adev, HDP_HWIP, 0) == IP_VERSION(4, 1, 1) ||
91 amdgpu_ip_version(adev, HDP_HWIP, 0) == IP_VERSION(4, 1, 0)) {
92 def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS));
93
94 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
95 data |= HDP_MEM_POWER_LS__LS_ENABLE_MASK;
96 else
97 data &= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK;
98
99 if (def != data)
100 WREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS), data);
101 } else {
102 def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_CTRL));
103
104 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
105 data |= HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK |
106 HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK |
107 HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK |
108 HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK;
109 else
110 data &= ~(HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK |
111 HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK |
112 HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK |
113 HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK);
114
115 if (def != data)
116 WREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_CTRL), data);
117 }
118 }
119
hdp_v4_0_get_clockgating_state(struct amdgpu_device * adev,u64 * flags)120 static void hdp_v4_0_get_clockgating_state(struct amdgpu_device *adev,
121 u64 *flags)
122 {
123 int data;
124
125 if (amdgpu_ip_version(adev, HDP_HWIP, 0) == IP_VERSION(4, 4, 2) ||
126 amdgpu_ip_version(adev, HDP_HWIP, 0) == IP_VERSION(4, 4, 5)) {
127 /* Default enabled */
128 *flags |= AMD_CG_SUPPORT_HDP_MGCG;
129 return;
130 }
131 /* AMD_CG_SUPPORT_HDP_LS */
132 data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS));
133 if (data & HDP_MEM_POWER_LS__LS_ENABLE_MASK)
134 *flags |= AMD_CG_SUPPORT_HDP_LS;
135 }
136
hdp_v4_0_init_registers(struct amdgpu_device * adev)137 static void hdp_v4_0_init_registers(struct amdgpu_device *adev)
138 {
139 switch (amdgpu_ip_version(adev, HDP_HWIP, 0)) {
140 case IP_VERSION(4, 2, 1):
141 WREG32_FIELD15(HDP, 0, HDP_MMHUB_CNTL, HDP_MMHUB_GCC, 1);
142 break;
143 default:
144 break;
145 }
146
147 /* Do not program registers if VF */
148 if (amdgpu_sriov_vf(adev))
149 return;
150
151 WREG32_FIELD15(HDP, 0, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 1);
152
153 if (amdgpu_ip_version(adev, HDP_HWIP, 0) == IP_VERSION(4, 4, 0))
154 WREG32_FIELD15(HDP, 0, HDP_MISC_CNTL, READ_BUFFER_WATERMARK, 2);
155
156 WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE, (adev->gmc.vram_start >> 8));
157 WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE_HI, (adev->gmc.vram_start >> 40));
158 }
159
160 struct amdgpu_ras_block_hw_ops hdp_v4_0_ras_hw_ops = {
161 .query_ras_error_count = hdp_v4_0_query_ras_error_count,
162 .reset_ras_error_count = hdp_v4_0_reset_ras_error_count,
163 };
164
165 struct amdgpu_hdp_ras hdp_v4_0_ras = {
166 .ras_block = {
167 .hw_ops = &hdp_v4_0_ras_hw_ops,
168 },
169 };
170
171 const struct amdgpu_hdp_funcs hdp_v4_0_funcs = {
172 .flush_hdp = amdgpu_hdp_generic_flush,
173 .invalidate_hdp = hdp_v4_0_invalidate_hdp,
174 .update_clock_gating = hdp_v4_0_update_clock_gating,
175 .get_clock_gating_state = hdp_v4_0_get_clockgating_state,
176 .init_registers = hdp_v4_0_init_registers,
177 };
178