xref: /linux/drivers/gpu/drm/amd/amdgpu/soc15_common.h (revision b7df4cc3a088a8ce6973c96731bc792dbf54ce28)
1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #ifndef __SOC15_COMMON_H__
25 #define __SOC15_COMMON_H__
26 
27 /* GET_INST returns the physical instance corresponding to a logical instance */
28 #define GET_INST(ip, inst) \
29 	(adev->ip_map.logical_to_dev_inst ? \
30 	adev->ip_map.logical_to_dev_inst(adev, ip##_HWIP, inst) : inst)
31 #define GET_MASK(ip, mask) \
32 	(adev->ip_map.logical_to_dev_mask ? \
33 	adev->ip_map.logical_to_dev_mask(adev, ip##_HWIP, mask) : mask)
34 
35 /* Register Access Macros */
36 #define SOC15_REG_OFFSET(ip, inst, reg)	(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg)
37 #define SOC15_REG_OFFSET1(ip, inst, reg, offset) \
38 	(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + (reg)+(offset))
39 
40 #define __WREG32_SOC15_RLC__(reg, value, flag, hwip, inst) \
41 	((amdgpu_sriov_vf(adev) && adev->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported) ? \
42 	 amdgpu_sriov_wreg(adev, reg, value, flag, hwip, inst) : \
43 	 WREG32(reg, value))
44 
45 #define __RREG32_SOC15_RLC__(reg, flag, hwip, inst) \
46 	((amdgpu_sriov_vf(adev) && adev->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported) ? \
47 	 amdgpu_sriov_rreg(adev, reg, flag, hwip, inst) : \
48 	 RREG32(reg))
49 
50 #define WREG32_FIELD15(ip, idx, reg, field, val)	\
51 	 __WREG32_SOC15_RLC__(adev->reg_offset[ip##_HWIP][idx][mm##reg##_BASE_IDX] + mm##reg,	\
52 				(__RREG32_SOC15_RLC__( \
53 					adev->reg_offset[ip##_HWIP][idx][mm##reg##_BASE_IDX] + mm##reg, \
54 					0, ip##_HWIP, idx) & \
55 				~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field), \
56 			      0, ip##_HWIP, idx)
57 
58 #define WREG32_FIELD15_PREREG(ip, idx, reg_name, field, val)        \
59 	__WREG32_SOC15_RLC__(adev->reg_offset[ip##_HWIP][idx][reg##reg_name##_BASE_IDX] + reg##reg_name,   \
60 			(__RREG32_SOC15_RLC__( \
61 					adev->reg_offset[ip##_HWIP][idx][reg##reg_name##_BASE_IDX] + reg##reg_name, \
62 					0, ip##_HWIP, idx) & \
63 					~REG_FIELD_MASK(reg_name, field)) | (val) << REG_FIELD_SHIFT(reg_name, field), \
64 			0, ip##_HWIP, idx)
65 
66 #define RREG32_SOC15(ip, inst, reg) \
67 	__RREG32_SOC15_RLC__(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg, \
68 			 0, ip##_HWIP, inst)
69 
70 #define RREG32_SOC15_IP(ip, reg) __RREG32_SOC15_RLC__(reg, 0, ip##_HWIP, 0)
71 
72 #define RREG32_SOC15_IP_NO_KIQ(ip, reg, inst) __RREG32_SOC15_RLC__(reg, AMDGPU_REGS_NO_KIQ, ip##_HWIP, inst)
73 
74 #define RREG32_SOC15_NO_KIQ(ip, inst, reg) \
75 	__RREG32_SOC15_RLC__(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg, \
76 			 AMDGPU_REGS_NO_KIQ, ip##_HWIP, inst)
77 
78 #define RREG32_SOC15_OFFSET(ip, inst, reg, offset) \
79 	 __RREG32_SOC15_RLC__((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + (reg)) + \
80 			 (offset), 0, ip##_HWIP, inst)
81 
82 #define WREG32_SOC15(ip, inst, reg, value) \
83 	 __WREG32_SOC15_RLC__((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg), \
84 			  value, 0, ip##_HWIP, inst)
85 
86 #define WREG32_SOC15_IP(ip, reg, value) \
87 	 __WREG32_SOC15_RLC__(reg, value, 0, ip##_HWIP, 0)
88 
89 #define WREG32_SOC15_IP_NO_KIQ(ip, reg, value, inst) \
90 	 __WREG32_SOC15_RLC__(reg, value, AMDGPU_REGS_NO_KIQ, ip##_HWIP, inst)
91 
92 #define WREG32_SOC15_NO_KIQ(ip, inst, reg, value) \
93 	__WREG32_SOC15_RLC__(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg, \
94 			     value, AMDGPU_REGS_NO_KIQ, ip##_HWIP, inst)
95 
96 #define WREG32_SOC15_OFFSET(ip, inst, reg, offset, value) \
97 	 __WREG32_SOC15_RLC__((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) + offset, \
98 			  value, 0, ip##_HWIP, inst)
99 
100 #define SOC15_WAIT_ON_RREG(ip, inst, reg, expected_value, mask)      \
101 	amdgpu_device_wait_on_rreg(adev, inst,                       \
102 	(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + (reg)), \
103 	#reg, expected_value, mask)
104 
105 #define SOC15_WAIT_ON_RREG_OFFSET(ip, inst, reg, offset, expected_value, mask)  \
106 	amdgpu_device_wait_on_rreg(adev, inst,                                  \
107 	(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + (reg) + (offset)), \
108 	#reg, expected_value, mask)
109 
110 #define WREG32_RLC(reg, value) \
111 	__WREG32_SOC15_RLC__(reg, value, AMDGPU_REGS_RLC, GC_HWIP, 0)
112 
113 #define WREG32_RLC_EX(prefix, reg, value, inst) \
114 	do {							\
115 		if (amdgpu_sriov_fullaccess(adev)) {    \
116 			uint32_t i = 0;	\
117 			uint32_t retries = 50000;	\
118 			uint32_t r0 = adev->reg_offset[GC_HWIP][inst][prefix##SCRATCH_REG0_BASE_IDX] + prefix##SCRATCH_REG0;	\
119 			uint32_t r1 = adev->reg_offset[GC_HWIP][inst][prefix##SCRATCH_REG1_BASE_IDX] + prefix##SCRATCH_REG1;	\
120 			uint32_t spare_int = adev->reg_offset[GC_HWIP][inst][prefix##RLC_SPARE_INT_BASE_IDX] + prefix##RLC_SPARE_INT;	\
121 			WREG32(r0, value);	\
122 			WREG32(r1, (reg | 0x80000000));	\
123 			WREG32(spare_int, 0x1);	\
124 			for (i = 0; i < retries; i++) {	\
125 				u32 tmp = RREG32(r1);	\
126 				if (!(tmp & 0x80000000))	\
127 					break;	\
128 				udelay(10);	\
129 			}	\
130 			if (i >= retries)	\
131 				pr_err("timeout: rlcg program reg:0x%05x failed !\n", reg);	\
132 		} else {	\
133 			WREG32(reg, value); \
134 		}	\
135 	} while (0)
136 
137 /* shadow the registers in the callback function */
138 #define WREG32_SOC15_RLC_SHADOW(ip, inst, reg, value) \
139 	__WREG32_SOC15_RLC__((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg), value, AMDGPU_REGS_RLC, GC_HWIP, inst)
140 
141 /* for GC only */
142 #define RREG32_RLC(reg) \
143 	__RREG32_SOC15_RLC__(reg, AMDGPU_REGS_RLC, GC_HWIP, 0)
144 
145 #define WREG32_RLC_NO_KIQ(reg, value, hwip) \
146 	__WREG32_SOC15_RLC__(reg, value, AMDGPU_REGS_NO_KIQ | AMDGPU_REGS_RLC, hwip, 0)
147 
148 #define RREG32_RLC_NO_KIQ(reg, hwip) \
149 	__RREG32_SOC15_RLC__(reg, AMDGPU_REGS_NO_KIQ | AMDGPU_REGS_RLC, hwip, 0)
150 
151 #define WREG32_SOC15_RLC_SHADOW_EX(prefix, ip, inst, reg, value) \
152 	do {							\
153 		uint32_t target_reg = adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg;\
154 		if (amdgpu_sriov_fullaccess(adev)) {    \
155 			uint32_t r2 = adev->reg_offset[GC_HWIP][inst][prefix##SCRATCH_REG1_BASE_IDX] + prefix##SCRATCH_REG2;	\
156 			uint32_t r3 = adev->reg_offset[GC_HWIP][inst][prefix##SCRATCH_REG1_BASE_IDX] + prefix##SCRATCH_REG3;	\
157 			uint32_t grbm_cntl = adev->reg_offset[GC_HWIP][inst][prefix##GRBM_GFX_CNTL_BASE_IDX] + prefix##GRBM_GFX_CNTL;   \
158 			uint32_t grbm_idx = adev->reg_offset[GC_HWIP][inst][prefix##GRBM_GFX_INDEX_BASE_IDX] + prefix##GRBM_GFX_INDEX;   \
159 			if (target_reg == grbm_cntl) \
160 				WREG32(r2, value);	\
161 			else if (target_reg == grbm_idx) \
162 				WREG32(r3, value);	\
163 			WREG32(target_reg, value);	\
164 		} else {	\
165 			WREG32(target_reg, value); \
166 		}	\
167 	} while (0)
168 
169 #define RREG32_SOC15_RLC(ip, inst, reg) \
170 	__RREG32_SOC15_RLC__(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg, AMDGPU_REGS_RLC, ip##_HWIP, inst)
171 
172 #define WREG32_SOC15_RLC(ip, inst, reg, value) \
173 	do {							\
174 		uint32_t target_reg = adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg;\
175 		__WREG32_SOC15_RLC__(target_reg, value, AMDGPU_REGS_RLC, ip##_HWIP, inst); \
176 	} while (0)
177 
178 #define WREG32_SOC15_RLC_EX(prefix, ip, inst, reg, value) \
179 	do {							\
180 			uint32_t target_reg = adev->reg_offset[GC_HWIP][inst][reg##_BASE_IDX] + reg;\
181 			WREG32_RLC_EX(prefix, target_reg, value, inst); \
182 	} while (0)
183 
184 #define WREG32_FIELD15_RLC(ip, idx, reg, field, val)   \
185 	__WREG32_SOC15_RLC__((adev->reg_offset[ip##_HWIP][idx][mm##reg##_BASE_IDX] + mm##reg), \
186 			     (__RREG32_SOC15_RLC__(adev->reg_offset[ip##_HWIP][idx][mm##reg##_BASE_IDX] + mm##reg, \
187 						   AMDGPU_REGS_RLC, ip##_HWIP, idx) & \
188 			      ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field), \
189 			     AMDGPU_REGS_RLC, ip##_HWIP, idx)
190 
191 #define WREG32_SOC15_OFFSET_RLC(ip, inst, reg, offset, value) \
192 	__WREG32_SOC15_RLC__((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) + offset, value, AMDGPU_REGS_RLC, ip##_HWIP, inst)
193 
194 #define RREG32_SOC15_OFFSET_RLC(ip, inst, reg, offset) \
195 	__RREG32_SOC15_RLC__((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) + offset, AMDGPU_REGS_RLC, ip##_HWIP, inst)
196 
197 /* inst equals to ext for some IPs */
198 #define RREG32_SOC15_EXT(ip, inst, reg, ext) \
199 	RREG32_PCIE_EXT((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) * 4 \
200 			+ adev->asic_funcs->encode_ext_smn_addressing(ext)) \
201 
202 #define WREG32_SOC15_EXT(ip, inst, reg, ext, value) \
203 	WREG32_PCIE_EXT((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) * 4 \
204 			+ adev->asic_funcs->encode_ext_smn_addressing(ext), \
205 			value) \
206 
207 #define RREG64_MCA(ext, mca_base, idx) \
208 	RREG64_PCIE_EXT(adev->asic_funcs->encode_ext_smn_addressing(ext) + mca_base + (idx * 8))
209 
210 #define WREG64_MCA(ext, mca_base, idx, val) \
211 	WREG64_PCIE_EXT(adev->asic_funcs->encode_ext_smn_addressing(ext) + mca_base + (idx * 8), val)
212 
213 #endif
214