xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_reg_access.c (revision 4a57e0913e8c7fff407e97909f4ae48caa84d612)
157052d29SLijo Lazar // SPDX-License-Identifier: MIT
257052d29SLijo Lazar /*
357052d29SLijo Lazar  * Copyright 2025 Advanced Micro Devices, Inc.
457052d29SLijo Lazar  *
557052d29SLijo Lazar  * Permission is hereby granted, free of charge, to any person obtaining a
657052d29SLijo Lazar  * copy of this software and associated documentation files (the "Software"),
757052d29SLijo Lazar  * to deal in the Software without restriction, including without limitation
857052d29SLijo Lazar  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
957052d29SLijo Lazar  * and/or sell copies of the Software, and to permit persons to whom the
1057052d29SLijo Lazar  * Software is furnished to do so, subject to the following conditions:
1157052d29SLijo Lazar  *
1257052d29SLijo Lazar  * The above copyright notice and this permission notice shall be included in
1357052d29SLijo Lazar  * all copies or substantial portions of the Software.
1457052d29SLijo Lazar  *
1557052d29SLijo Lazar  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
1657052d29SLijo Lazar  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
1757052d29SLijo Lazar  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
1857052d29SLijo Lazar  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
1957052d29SLijo Lazar  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
2057052d29SLijo Lazar  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
2157052d29SLijo Lazar  * OTHER DEALINGS IN THE SOFTWARE.
2257052d29SLijo Lazar  */
2357052d29SLijo Lazar 
2457052d29SLijo Lazar #include <linux/delay.h>
2557052d29SLijo Lazar 
2657052d29SLijo Lazar #include "amdgpu.h"
2757052d29SLijo Lazar #include "amdgpu_reset.h"
2857052d29SLijo Lazar #include "amdgpu_trace.h"
2957052d29SLijo Lazar #include "amdgpu_virt.h"
3057052d29SLijo Lazar #include "amdgpu_reg_access.h"
3157052d29SLijo Lazar 
3257052d29SLijo Lazar #define AMDGPU_PCIE_INDEX_FALLBACK (0x38 >> 2)
3357052d29SLijo Lazar #define AMDGPU_PCIE_INDEX_HI_FALLBACK (0x44 >> 2)
3457052d29SLijo Lazar #define AMDGPU_PCIE_DATA_FALLBACK (0x3C >> 2)
3557052d29SLijo Lazar 
36f4eb08f8SLijo Lazar void amdgpu_reg_access_init(struct amdgpu_device *adev)
37f4eb08f8SLijo Lazar {
38f4eb08f8SLijo Lazar 	spin_lock_init(&adev->reg.smc.lock);
39f4eb08f8SLijo Lazar 	adev->reg.smc.rreg = NULL;
40f4eb08f8SLijo Lazar 	adev->reg.smc.wreg = NULL;
41366201e7SLijo Lazar 
42366201e7SLijo Lazar 	spin_lock_init(&adev->reg.uvd_ctx.lock);
43366201e7SLijo Lazar 	adev->reg.uvd_ctx.rreg = NULL;
44366201e7SLijo Lazar 	adev->reg.uvd_ctx.wreg = NULL;
454780a26aSLijo Lazar 
464780a26aSLijo Lazar 	spin_lock_init(&adev->reg.didt.lock);
474780a26aSLijo Lazar 	adev->reg.didt.rreg = NULL;
484780a26aSLijo Lazar 	adev->reg.didt.wreg = NULL;
49d2de787fSLijo Lazar 
50d2de787fSLijo Lazar 	spin_lock_init(&adev->reg.gc_cac.lock);
51d2de787fSLijo Lazar 	adev->reg.gc_cac.rreg = NULL;
52d2de787fSLijo Lazar 	adev->reg.gc_cac.wreg = NULL;
53b1a516a5SLijo Lazar 
54b1a516a5SLijo Lazar 	spin_lock_init(&adev->reg.se_cac.lock);
55b1a516a5SLijo Lazar 	adev->reg.se_cac.rreg = NULL;
56b1a516a5SLijo Lazar 	adev->reg.se_cac.wreg = NULL;
5772cc2e30SLijo Lazar 
5872cc2e30SLijo Lazar 	spin_lock_init(&adev->reg.audio_endpt.lock);
5972cc2e30SLijo Lazar 	adev->reg.audio_endpt.rreg = NULL;
6072cc2e30SLijo Lazar 	adev->reg.audio_endpt.wreg = NULL;
615d82f451SLijo Lazar 
62b2d55124SLijo Lazar 	spin_lock_init(&adev->reg.pcie.lock);
63e84d7e71SLijo Lazar 	adev->reg.pcie.rreg = NULL;
64e84d7e71SLijo Lazar 	adev->reg.pcie.wreg = NULL;
655312d68aSLijo Lazar 	adev->reg.pcie.rreg_ext = NULL;
665312d68aSLijo Lazar 	adev->reg.pcie.wreg_ext = NULL;
6774b9c49eSLijo Lazar 	adev->reg.pcie.rreg64 = NULL;
6874b9c49eSLijo Lazar 	adev->reg.pcie.wreg64 = NULL;
694a6ab037SLijo Lazar 	adev->reg.pcie.rreg64_ext = NULL;
704a6ab037SLijo Lazar 	adev->reg.pcie.wreg64_ext = NULL;
715d82f451SLijo Lazar 	adev->reg.pcie.port_rreg = NULL;
725d82f451SLijo Lazar 	adev->reg.pcie.port_wreg = NULL;
73f4eb08f8SLijo Lazar }
74f4eb08f8SLijo Lazar 
75f4eb08f8SLijo Lazar uint32_t amdgpu_reg_smc_rd32(struct amdgpu_device *adev, uint32_t reg)
76f4eb08f8SLijo Lazar {
77f4eb08f8SLijo Lazar 	if (!adev->reg.smc.rreg) {
78f4eb08f8SLijo Lazar 		dev_err_once(adev->dev, "SMC register read not supported\n");
79f4eb08f8SLijo Lazar 		return 0;
80f4eb08f8SLijo Lazar 	}
81f4eb08f8SLijo Lazar 	return adev->reg.smc.rreg(adev, reg);
82f4eb08f8SLijo Lazar }
83f4eb08f8SLijo Lazar 
84f4eb08f8SLijo Lazar void amdgpu_reg_smc_wr32(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
85f4eb08f8SLijo Lazar {
86f4eb08f8SLijo Lazar 	if (!adev->reg.smc.wreg) {
87f4eb08f8SLijo Lazar 		dev_err_once(adev->dev, "SMC register write not supported\n");
88f4eb08f8SLijo Lazar 		return;
89f4eb08f8SLijo Lazar 	}
90f4eb08f8SLijo Lazar 	adev->reg.smc.wreg(adev, reg, v);
91f4eb08f8SLijo Lazar }
92f4eb08f8SLijo Lazar 
93366201e7SLijo Lazar uint32_t amdgpu_reg_uvd_ctx_rd32(struct amdgpu_device *adev, uint32_t reg)
94366201e7SLijo Lazar {
95366201e7SLijo Lazar 	if (!adev->reg.uvd_ctx.rreg) {
96366201e7SLijo Lazar 		dev_err_once(adev->dev,
97366201e7SLijo Lazar 			     "UVD_CTX register read not supported\n");
98366201e7SLijo Lazar 		return 0;
99366201e7SLijo Lazar 	}
100366201e7SLijo Lazar 	return adev->reg.uvd_ctx.rreg(adev, reg);
101366201e7SLijo Lazar }
102366201e7SLijo Lazar 
103366201e7SLijo Lazar void amdgpu_reg_uvd_ctx_wr32(struct amdgpu_device *adev, uint32_t reg,
104366201e7SLijo Lazar 			     uint32_t v)
105366201e7SLijo Lazar {
106366201e7SLijo Lazar 	if (!adev->reg.uvd_ctx.wreg) {
107366201e7SLijo Lazar 		dev_err_once(adev->dev,
108366201e7SLijo Lazar 			     "UVD_CTX register write not supported\n");
109366201e7SLijo Lazar 		return;
110366201e7SLijo Lazar 	}
111366201e7SLijo Lazar 	adev->reg.uvd_ctx.wreg(adev, reg, v);
112366201e7SLijo Lazar }
113366201e7SLijo Lazar 
1144780a26aSLijo Lazar uint32_t amdgpu_reg_didt_rd32(struct amdgpu_device *adev, uint32_t reg)
1154780a26aSLijo Lazar {
1164780a26aSLijo Lazar 	if (!adev->reg.didt.rreg) {
1174780a26aSLijo Lazar 		dev_err_once(adev->dev, "DIDT register read not supported\n");
1184780a26aSLijo Lazar 		return 0;
1194780a26aSLijo Lazar 	}
1204780a26aSLijo Lazar 	return adev->reg.didt.rreg(adev, reg);
1214780a26aSLijo Lazar }
1224780a26aSLijo Lazar 
1234780a26aSLijo Lazar void amdgpu_reg_didt_wr32(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
1244780a26aSLijo Lazar {
1254780a26aSLijo Lazar 	if (!adev->reg.didt.wreg) {
1264780a26aSLijo Lazar 		dev_err_once(adev->dev, "DIDT register write not supported\n");
1274780a26aSLijo Lazar 		return;
1284780a26aSLijo Lazar 	}
1294780a26aSLijo Lazar 	adev->reg.didt.wreg(adev, reg, v);
1304780a26aSLijo Lazar }
1314780a26aSLijo Lazar 
132d2de787fSLijo Lazar uint32_t amdgpu_reg_gc_cac_rd32(struct amdgpu_device *adev, uint32_t reg)
133d2de787fSLijo Lazar {
134d2de787fSLijo Lazar 	if (!adev->reg.gc_cac.rreg) {
135d2de787fSLijo Lazar 		dev_err_once(adev->dev, "GC_CAC register read not supported\n");
136d2de787fSLijo Lazar 		return 0;
137d2de787fSLijo Lazar 	}
138d2de787fSLijo Lazar 	return adev->reg.gc_cac.rreg(adev, reg);
139d2de787fSLijo Lazar }
140d2de787fSLijo Lazar 
141d2de787fSLijo Lazar void amdgpu_reg_gc_cac_wr32(struct amdgpu_device *adev, uint32_t reg,
142d2de787fSLijo Lazar 			    uint32_t v)
143d2de787fSLijo Lazar {
144d2de787fSLijo Lazar 	if (!adev->reg.gc_cac.wreg) {
145d2de787fSLijo Lazar 		dev_err_once(adev->dev,
146d2de787fSLijo Lazar 			     "GC_CAC register write not supported\n");
147d2de787fSLijo Lazar 		return;
148d2de787fSLijo Lazar 	}
149d2de787fSLijo Lazar 	adev->reg.gc_cac.wreg(adev, reg, v);
150d2de787fSLijo Lazar }
151d2de787fSLijo Lazar 
152b1a516a5SLijo Lazar uint32_t amdgpu_reg_se_cac_rd32(struct amdgpu_device *adev, uint32_t reg)
153b1a516a5SLijo Lazar {
154b1a516a5SLijo Lazar 	if (!adev->reg.se_cac.rreg) {
155b1a516a5SLijo Lazar 		dev_err_once(adev->dev, "SE_CAC register read not supported\n");
156b1a516a5SLijo Lazar 		return 0;
157b1a516a5SLijo Lazar 	}
158b1a516a5SLijo Lazar 	return adev->reg.se_cac.rreg(adev, reg);
159b1a516a5SLijo Lazar }
160b1a516a5SLijo Lazar 
161b1a516a5SLijo Lazar void amdgpu_reg_se_cac_wr32(struct amdgpu_device *adev, uint32_t reg,
162b1a516a5SLijo Lazar 			    uint32_t v)
163b1a516a5SLijo Lazar {
164b1a516a5SLijo Lazar 	if (!adev->reg.se_cac.wreg) {
165b1a516a5SLijo Lazar 		dev_err_once(adev->dev,
166b1a516a5SLijo Lazar 			     "SE_CAC register write not supported\n");
167b1a516a5SLijo Lazar 		return;
168b1a516a5SLijo Lazar 	}
169b1a516a5SLijo Lazar 	adev->reg.se_cac.wreg(adev, reg, v);
170b1a516a5SLijo Lazar }
171b1a516a5SLijo Lazar 
17272cc2e30SLijo Lazar uint32_t amdgpu_reg_audio_endpt_rd32(struct amdgpu_device *adev, uint32_t block,
17372cc2e30SLijo Lazar 				     uint32_t reg)
17472cc2e30SLijo Lazar {
17572cc2e30SLijo Lazar 	if (!adev->reg.audio_endpt.rreg) {
17672cc2e30SLijo Lazar 		dev_err_once(adev->dev,
17772cc2e30SLijo Lazar 			     "AUDIO_ENDPT register read not supported\n");
17872cc2e30SLijo Lazar 		return 0;
17972cc2e30SLijo Lazar 	}
18072cc2e30SLijo Lazar 	return adev->reg.audio_endpt.rreg(adev, block, reg);
18172cc2e30SLijo Lazar }
18272cc2e30SLijo Lazar 
18372cc2e30SLijo Lazar void amdgpu_reg_audio_endpt_wr32(struct amdgpu_device *adev, uint32_t block,
18472cc2e30SLijo Lazar 				 uint32_t reg, uint32_t v)
18572cc2e30SLijo Lazar {
18672cc2e30SLijo Lazar 	if (!adev->reg.audio_endpt.wreg) {
18772cc2e30SLijo Lazar 		dev_err_once(adev->dev,
18872cc2e30SLijo Lazar 			     "AUDIO_ENDPT register write not supported\n");
18972cc2e30SLijo Lazar 		return;
19072cc2e30SLijo Lazar 	}
19172cc2e30SLijo Lazar 	adev->reg.audio_endpt.wreg(adev, block, reg, v);
19272cc2e30SLijo Lazar }
19372cc2e30SLijo Lazar 
194e84d7e71SLijo Lazar uint32_t amdgpu_reg_pcie_rd32(struct amdgpu_device *adev, uint32_t reg)
195e84d7e71SLijo Lazar {
196e84d7e71SLijo Lazar 	if (!adev->reg.pcie.rreg) {
197e84d7e71SLijo Lazar 		dev_err_once(adev->dev, "PCIE register read not supported\n");
198e84d7e71SLijo Lazar 		return 0;
199e84d7e71SLijo Lazar 	}
200e84d7e71SLijo Lazar 	return adev->reg.pcie.rreg(adev, reg);
201e84d7e71SLijo Lazar }
202e84d7e71SLijo Lazar 
203e84d7e71SLijo Lazar void amdgpu_reg_pcie_wr32(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
204e84d7e71SLijo Lazar {
205e84d7e71SLijo Lazar 	if (!adev->reg.pcie.wreg) {
206e84d7e71SLijo Lazar 		dev_err_once(adev->dev, "PCIE register write not supported\n");
207e84d7e71SLijo Lazar 		return;
208e84d7e71SLijo Lazar 	}
209e84d7e71SLijo Lazar 	adev->reg.pcie.wreg(adev, reg, v);
210e84d7e71SLijo Lazar }
211e84d7e71SLijo Lazar 
2125312d68aSLijo Lazar uint32_t amdgpu_reg_pcie_ext_rd32(struct amdgpu_device *adev, uint64_t reg)
2135312d68aSLijo Lazar {
2145312d68aSLijo Lazar 	if (!adev->reg.pcie.rreg_ext) {
2155312d68aSLijo Lazar 		dev_err_once(adev->dev, "PCIE EXT register read not supported\n");
2165312d68aSLijo Lazar 		return 0;
2175312d68aSLijo Lazar 	}
2185312d68aSLijo Lazar 	return adev->reg.pcie.rreg_ext(adev, reg);
2195312d68aSLijo Lazar }
2205312d68aSLijo Lazar 
2215312d68aSLijo Lazar void amdgpu_reg_pcie_ext_wr32(struct amdgpu_device *adev, uint64_t reg,
2225312d68aSLijo Lazar 			      uint32_t v)
2235312d68aSLijo Lazar {
2245312d68aSLijo Lazar 	if (!adev->reg.pcie.wreg_ext) {
2255312d68aSLijo Lazar 		dev_err_once(adev->dev, "PCIE EXT register write not supported\n");
2265312d68aSLijo Lazar 		return;
2275312d68aSLijo Lazar 	}
2285312d68aSLijo Lazar 	adev->reg.pcie.wreg_ext(adev, reg, v);
2295312d68aSLijo Lazar }
2305312d68aSLijo Lazar 
23174b9c49eSLijo Lazar uint64_t amdgpu_reg_pcie_rd64(struct amdgpu_device *adev, uint32_t reg)
23274b9c49eSLijo Lazar {
23374b9c49eSLijo Lazar 	if (!adev->reg.pcie.rreg64) {
23474b9c49eSLijo Lazar 		dev_err_once(adev->dev, "PCIE 64-bit register read not supported\n");
23574b9c49eSLijo Lazar 		return 0;
23674b9c49eSLijo Lazar 	}
23774b9c49eSLijo Lazar 	return adev->reg.pcie.rreg64(adev, reg);
23874b9c49eSLijo Lazar }
23974b9c49eSLijo Lazar 
24074b9c49eSLijo Lazar void amdgpu_reg_pcie_wr64(struct amdgpu_device *adev, uint32_t reg, uint64_t v)
24174b9c49eSLijo Lazar {
24274b9c49eSLijo Lazar 	if (!adev->reg.pcie.wreg64) {
24374b9c49eSLijo Lazar 		dev_err_once(adev->dev, "PCIE 64-bit register write not supported\n");
24474b9c49eSLijo Lazar 		return;
24574b9c49eSLijo Lazar 	}
24674b9c49eSLijo Lazar 	adev->reg.pcie.wreg64(adev, reg, v);
24774b9c49eSLijo Lazar }
24874b9c49eSLijo Lazar 
2494a6ab037SLijo Lazar uint64_t amdgpu_reg_pcie_ext_rd64(struct amdgpu_device *adev, uint64_t reg)
2504a6ab037SLijo Lazar {
2514a6ab037SLijo Lazar 	if (!adev->reg.pcie.rreg64_ext) {
2524a6ab037SLijo Lazar 		dev_err_once(adev->dev, "PCIE EXT 64-bit register read not supported\n");
2534a6ab037SLijo Lazar 		return 0;
2544a6ab037SLijo Lazar 	}
2554a6ab037SLijo Lazar 	return adev->reg.pcie.rreg64_ext(adev, reg);
2564a6ab037SLijo Lazar }
2574a6ab037SLijo Lazar 
2584a6ab037SLijo Lazar void amdgpu_reg_pcie_ext_wr64(struct amdgpu_device *adev, uint64_t reg,
2594a6ab037SLijo Lazar 			      uint64_t v)
2604a6ab037SLijo Lazar {
2614a6ab037SLijo Lazar 	if (!adev->reg.pcie.wreg64_ext) {
2624a6ab037SLijo Lazar 		dev_err_once(adev->dev, "PCIE EXT 64-bit register write not supported\n");
2634a6ab037SLijo Lazar 		return;
2644a6ab037SLijo Lazar 	}
2654a6ab037SLijo Lazar 	adev->reg.pcie.wreg64_ext(adev, reg, v);
2664a6ab037SLijo Lazar }
2674a6ab037SLijo Lazar 
2685d82f451SLijo Lazar uint32_t amdgpu_reg_pciep_rd32(struct amdgpu_device *adev, uint32_t reg)
2695d82f451SLijo Lazar {
2705d82f451SLijo Lazar 	if (!adev->reg.pcie.port_rreg) {
2715d82f451SLijo Lazar 		dev_err_once(adev->dev, "PCIEP register read not supported\n");
2725d82f451SLijo Lazar 		return 0;
2735d82f451SLijo Lazar 	}
2745d82f451SLijo Lazar 	return adev->reg.pcie.port_rreg(adev, reg);
2755d82f451SLijo Lazar }
2765d82f451SLijo Lazar 
2775d82f451SLijo Lazar void amdgpu_reg_pciep_wr32(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
2785d82f451SLijo Lazar {
2795d82f451SLijo Lazar 	if (!adev->reg.pcie.port_wreg) {
2805d82f451SLijo Lazar 		dev_err_once(adev->dev, "PCIEP register write not supported\n");
2815d82f451SLijo Lazar 		return;
2825d82f451SLijo Lazar 	}
2835d82f451SLijo Lazar 	adev->reg.pcie.port_wreg(adev, reg, v);
2845d82f451SLijo Lazar }
2855d82f451SLijo Lazar 
286*36a02456SLijo Lazar static int amdgpu_reg_get_smn_base_version(struct amdgpu_device *adev)
287*36a02456SLijo Lazar {
288*36a02456SLijo Lazar 	struct pci_dev *pdev = adev->pdev;
289*36a02456SLijo Lazar 	int id;
290*36a02456SLijo Lazar 
291*36a02456SLijo Lazar 	if (amdgpu_sriov_vf(adev))
292*36a02456SLijo Lazar 		return -EOPNOTSUPP;
293*36a02456SLijo Lazar 
294*36a02456SLijo Lazar 	id = (pdev->device >> 4) & 0xFFFF;
295*36a02456SLijo Lazar 	if (id == 0x74A || id == 0x74B || id == 0x75A || id == 0x75B)
296*36a02456SLijo Lazar 		return 1;
297*36a02456SLijo Lazar 
298*36a02456SLijo Lazar 	return -EOPNOTSUPP;
299*36a02456SLijo Lazar }
300*36a02456SLijo Lazar 
301467ebfe6SLijo Lazar uint64_t amdgpu_reg_get_smn_base64(struct amdgpu_device *adev,
302467ebfe6SLijo Lazar 				   enum amd_hw_ip_block_type block,
303467ebfe6SLijo Lazar 				   int die_inst)
304467ebfe6SLijo Lazar {
305467ebfe6SLijo Lazar 	if (!adev->reg.smn.get_smn_base) {
306*36a02456SLijo Lazar 		int version = amdgpu_reg_get_smn_base_version(adev);
307*36a02456SLijo Lazar 		switch (version) {
308*36a02456SLijo Lazar 		case 1:
309*36a02456SLijo Lazar 			return amdgpu_reg_smn_v1_0_get_base(adev, block,
310*36a02456SLijo Lazar 							    die_inst);
311*36a02456SLijo Lazar 		default:
312*36a02456SLijo Lazar 			dev_err_once(
313*36a02456SLijo Lazar 				adev->dev,
314*36a02456SLijo Lazar 				"SMN base address query not supported for this device\n");
315*36a02456SLijo Lazar 			return 0;
316*36a02456SLijo Lazar 		}
317467ebfe6SLijo Lazar 	}
318467ebfe6SLijo Lazar 	return adev->reg.smn.get_smn_base(adev, block, die_inst);
319467ebfe6SLijo Lazar }
320467ebfe6SLijo Lazar 
321*36a02456SLijo Lazar uint64_t amdgpu_reg_smn_v1_0_get_base(struct amdgpu_device *adev,
322*36a02456SLijo Lazar 				      enum amd_hw_ip_block_type block,
323*36a02456SLijo Lazar 				      int die_inst)
324*36a02456SLijo Lazar {
325*36a02456SLijo Lazar 	uint64_t smn_base;
326*36a02456SLijo Lazar 
327*36a02456SLijo Lazar 	if (die_inst == 0)
328*36a02456SLijo Lazar 		return 0;
329*36a02456SLijo Lazar 
330*36a02456SLijo Lazar 	switch (block) {
331*36a02456SLijo Lazar 	case XGMI_HWIP:
332*36a02456SLijo Lazar 	case NBIO_HWIP:
333*36a02456SLijo Lazar 	case MP0_HWIP:
334*36a02456SLijo Lazar 	case UMC_HWIP:
335*36a02456SLijo Lazar 	case DF_HWIP:
336*36a02456SLijo Lazar 		smn_base = ((uint64_t)(die_inst & 0x3) << 32) | (1ULL << 34);
337*36a02456SLijo Lazar 		break;
338*36a02456SLijo Lazar 	default:
339*36a02456SLijo Lazar 		dev_warn_once(
340*36a02456SLijo Lazar 			adev->dev,
341*36a02456SLijo Lazar 			"SMN base address query not supported for this block %d\n",
342*36a02456SLijo Lazar 			block);
343*36a02456SLijo Lazar 		smn_base = 0;
344*36a02456SLijo Lazar 		break;
345*36a02456SLijo Lazar 	}
346*36a02456SLijo Lazar 
347*36a02456SLijo Lazar 	return smn_base;
348*36a02456SLijo Lazar }
349*36a02456SLijo Lazar 
35057052d29SLijo Lazar /*
35157052d29SLijo Lazar  * register access helper functions.
35257052d29SLijo Lazar  */
35357052d29SLijo Lazar 
35457052d29SLijo Lazar /**
35557052d29SLijo Lazar  * amdgpu_device_rreg - read a memory mapped IO or indirect register
35657052d29SLijo Lazar  *
35757052d29SLijo Lazar  * @adev: amdgpu_device pointer
35857052d29SLijo Lazar  * @reg: dword aligned register offset
35957052d29SLijo Lazar  * @acc_flags: access flags which require special behavior
36057052d29SLijo Lazar  *
36157052d29SLijo Lazar  * Returns the 32 bit value from the offset specified.
36257052d29SLijo Lazar  */
36357052d29SLijo Lazar uint32_t amdgpu_device_rreg(struct amdgpu_device *adev, uint32_t reg,
36457052d29SLijo Lazar 			    uint32_t acc_flags)
36557052d29SLijo Lazar {
36657052d29SLijo Lazar 	uint32_t ret;
36757052d29SLijo Lazar 
36857052d29SLijo Lazar 	if (amdgpu_device_skip_hw_access(adev))
36957052d29SLijo Lazar 		return 0;
37057052d29SLijo Lazar 
37157052d29SLijo Lazar 	if ((reg * 4) < adev->rmmio_size) {
37257052d29SLijo Lazar 		if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
37357052d29SLijo Lazar 		    amdgpu_sriov_runtime(adev) &&
37457052d29SLijo Lazar 		    down_read_trylock(&adev->reset_domain->sem)) {
37557052d29SLijo Lazar 			ret = amdgpu_kiq_rreg(adev, reg, 0);
37657052d29SLijo Lazar 			up_read(&adev->reset_domain->sem);
37757052d29SLijo Lazar 		} else {
37857052d29SLijo Lazar 			ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
37957052d29SLijo Lazar 		}
38057052d29SLijo Lazar 	} else {
381e84d7e71SLijo Lazar 		ret = amdgpu_reg_pcie_rd32(adev, reg * 4);
38257052d29SLijo Lazar 	}
38357052d29SLijo Lazar 
38457052d29SLijo Lazar 	trace_amdgpu_device_rreg(adev->pdev->device, reg, ret);
38557052d29SLijo Lazar 
38657052d29SLijo Lazar 	return ret;
38757052d29SLijo Lazar }
38857052d29SLijo Lazar 
38957052d29SLijo Lazar /*
39057052d29SLijo Lazar  * MMIO register read with bytes helper functions
39157052d29SLijo Lazar  * @offset:bytes offset from MMIO start
39257052d29SLijo Lazar  */
39357052d29SLijo Lazar 
39457052d29SLijo Lazar /**
39557052d29SLijo Lazar  * amdgpu_mm_rreg8 - read a memory mapped IO register
39657052d29SLijo Lazar  *
39757052d29SLijo Lazar  * @adev: amdgpu_device pointer
39857052d29SLijo Lazar  * @offset: byte aligned register offset
39957052d29SLijo Lazar  *
40057052d29SLijo Lazar  * Returns the 8 bit value from the offset specified.
40157052d29SLijo Lazar  */
40257052d29SLijo Lazar uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset)
40357052d29SLijo Lazar {
40457052d29SLijo Lazar 	if (amdgpu_device_skip_hw_access(adev))
40557052d29SLijo Lazar 		return 0;
40657052d29SLijo Lazar 
40757052d29SLijo Lazar 	if (offset < adev->rmmio_size)
40857052d29SLijo Lazar 		return (readb(adev->rmmio + offset));
40957052d29SLijo Lazar 	BUG();
41057052d29SLijo Lazar }
41157052d29SLijo Lazar 
41257052d29SLijo Lazar /**
41357052d29SLijo Lazar  * amdgpu_device_xcc_rreg - read a memory mapped IO or indirect register with specific XCC
41457052d29SLijo Lazar  *
41557052d29SLijo Lazar  * @adev: amdgpu_device pointer
41657052d29SLijo Lazar  * @reg: dword aligned register offset
41757052d29SLijo Lazar  * @acc_flags: access flags which require special behavior
41857052d29SLijo Lazar  * @xcc_id: xcc accelerated compute core id
41957052d29SLijo Lazar  *
42057052d29SLijo Lazar  * Returns the 32 bit value from the offset specified.
42157052d29SLijo Lazar  */
42257052d29SLijo Lazar uint32_t amdgpu_device_xcc_rreg(struct amdgpu_device *adev, uint32_t reg,
42357052d29SLijo Lazar 				uint32_t acc_flags, uint32_t xcc_id)
42457052d29SLijo Lazar {
42557052d29SLijo Lazar 	uint32_t ret, rlcg_flag;
42657052d29SLijo Lazar 
42757052d29SLijo Lazar 	if (amdgpu_device_skip_hw_access(adev))
42857052d29SLijo Lazar 		return 0;
42957052d29SLijo Lazar 
43057052d29SLijo Lazar 	if ((reg * 4) < adev->rmmio_size) {
43157052d29SLijo Lazar 		if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_runtime(adev) &&
43257052d29SLijo Lazar 		    adev->gfx.rlc.rlcg_reg_access_supported &&
43357052d29SLijo Lazar 		    amdgpu_virt_get_rlcg_reg_access_flag(
43457052d29SLijo Lazar 			    adev, acc_flags, GC_HWIP, false, &rlcg_flag)) {
43557052d29SLijo Lazar 			ret = amdgpu_virt_rlcg_reg_rw(adev, reg, 0, rlcg_flag,
43657052d29SLijo Lazar 						      GET_INST(GC, xcc_id));
43757052d29SLijo Lazar 		} else if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
43857052d29SLijo Lazar 			   amdgpu_sriov_runtime(adev) &&
43957052d29SLijo Lazar 			   down_read_trylock(&adev->reset_domain->sem)) {
44057052d29SLijo Lazar 			ret = amdgpu_kiq_rreg(adev, reg, xcc_id);
44157052d29SLijo Lazar 			up_read(&adev->reset_domain->sem);
44257052d29SLijo Lazar 		} else {
44357052d29SLijo Lazar 			ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
44457052d29SLijo Lazar 		}
44557052d29SLijo Lazar 	} else {
446e84d7e71SLijo Lazar 		ret = amdgpu_reg_pcie_rd32(adev, reg * 4);
44757052d29SLijo Lazar 	}
44857052d29SLijo Lazar 
44957052d29SLijo Lazar 	return ret;
45057052d29SLijo Lazar }
45157052d29SLijo Lazar 
45257052d29SLijo Lazar /*
45357052d29SLijo Lazar  * MMIO register write with bytes helper functions
45457052d29SLijo Lazar  * @offset:bytes offset from MMIO start
45557052d29SLijo Lazar  * @value: the value want to be written to the register
45657052d29SLijo Lazar  */
45757052d29SLijo Lazar 
45857052d29SLijo Lazar /**
45957052d29SLijo Lazar  * amdgpu_mm_wreg8 - read a memory mapped IO register
46057052d29SLijo Lazar  *
46157052d29SLijo Lazar  * @adev: amdgpu_device pointer
46257052d29SLijo Lazar  * @offset: byte aligned register offset
46357052d29SLijo Lazar  * @value: 8 bit value to write
46457052d29SLijo Lazar  *
46557052d29SLijo Lazar  * Writes the value specified to the offset specified.
46657052d29SLijo Lazar  */
46757052d29SLijo Lazar void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value)
46857052d29SLijo Lazar {
46957052d29SLijo Lazar 	if (amdgpu_device_skip_hw_access(adev))
47057052d29SLijo Lazar 		return;
47157052d29SLijo Lazar 
47257052d29SLijo Lazar 	if (offset < adev->rmmio_size)
47357052d29SLijo Lazar 		writeb(value, adev->rmmio + offset);
47457052d29SLijo Lazar 	else
47557052d29SLijo Lazar 		BUG();
47657052d29SLijo Lazar }
47757052d29SLijo Lazar 
47857052d29SLijo Lazar /**
47957052d29SLijo Lazar  * amdgpu_device_wreg - write to a memory mapped IO or indirect register
48057052d29SLijo Lazar  *
48157052d29SLijo Lazar  * @adev: amdgpu_device pointer
48257052d29SLijo Lazar  * @reg: dword aligned register offset
48357052d29SLijo Lazar  * @v: 32 bit value to write to the register
48457052d29SLijo Lazar  * @acc_flags: access flags which require special behavior
48557052d29SLijo Lazar  *
48657052d29SLijo Lazar  * Writes the value specified to the offset specified.
48757052d29SLijo Lazar  */
48857052d29SLijo Lazar void amdgpu_device_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
48957052d29SLijo Lazar 			uint32_t acc_flags)
49057052d29SLijo Lazar {
49157052d29SLijo Lazar 	if (amdgpu_device_skip_hw_access(adev))
49257052d29SLijo Lazar 		return;
49357052d29SLijo Lazar 
49457052d29SLijo Lazar 	if ((reg * 4) < adev->rmmio_size) {
49557052d29SLijo Lazar 		if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
49657052d29SLijo Lazar 		    amdgpu_sriov_runtime(adev) &&
49757052d29SLijo Lazar 		    down_read_trylock(&adev->reset_domain->sem)) {
49857052d29SLijo Lazar 			amdgpu_kiq_wreg(adev, reg, v, 0);
49957052d29SLijo Lazar 			up_read(&adev->reset_domain->sem);
50057052d29SLijo Lazar 		} else {
50157052d29SLijo Lazar 			writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
50257052d29SLijo Lazar 		}
50357052d29SLijo Lazar 	} else {
504e84d7e71SLijo Lazar 		amdgpu_reg_pcie_wr32(adev, reg * 4, v);
50557052d29SLijo Lazar 	}
50657052d29SLijo Lazar 
50757052d29SLijo Lazar 	trace_amdgpu_device_wreg(adev->pdev->device, reg, v);
50857052d29SLijo Lazar }
50957052d29SLijo Lazar 
51057052d29SLijo Lazar /**
51157052d29SLijo Lazar  * amdgpu_mm_wreg_mmio_rlc -  write register either with direct/indirect mmio or with RLC path if in range
51257052d29SLijo Lazar  *
51357052d29SLijo Lazar  * @adev: amdgpu_device pointer
51457052d29SLijo Lazar  * @reg: mmio/rlc register
51557052d29SLijo Lazar  * @v: value to write
51657052d29SLijo Lazar  * @xcc_id: xcc accelerated compute core id
51757052d29SLijo Lazar  *
51857052d29SLijo Lazar  * this function is invoked only for the debugfs register access
51957052d29SLijo Lazar  */
52057052d29SLijo Lazar void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev, uint32_t reg,
52157052d29SLijo Lazar 			     uint32_t v, uint32_t xcc_id)
52257052d29SLijo Lazar {
52357052d29SLijo Lazar 	if (amdgpu_device_skip_hw_access(adev))
52457052d29SLijo Lazar 		return;
52557052d29SLijo Lazar 
52657052d29SLijo Lazar 	if (amdgpu_sriov_fullaccess(adev) && adev->gfx.rlc.funcs &&
52757052d29SLijo Lazar 	    adev->gfx.rlc.funcs->is_rlcg_access_range) {
52857052d29SLijo Lazar 		if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg))
52957052d29SLijo Lazar 			return amdgpu_sriov_wreg(adev, reg, v, 0, 0, xcc_id);
53057052d29SLijo Lazar 	} else if ((reg * 4) >= adev->rmmio_size) {
531e84d7e71SLijo Lazar 		amdgpu_reg_pcie_wr32(adev, reg * 4, v);
53257052d29SLijo Lazar 	} else {
53357052d29SLijo Lazar 		writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
53457052d29SLijo Lazar 	}
53557052d29SLijo Lazar }
53657052d29SLijo Lazar 
53757052d29SLijo Lazar /**
53857052d29SLijo Lazar  * amdgpu_device_xcc_wreg - write to a memory mapped IO or indirect register with specific XCC
53957052d29SLijo Lazar  *
54057052d29SLijo Lazar  * @adev: amdgpu_device pointer
54157052d29SLijo Lazar  * @reg: dword aligned register offset
54257052d29SLijo Lazar  * @v: 32 bit value to write to the register
54357052d29SLijo Lazar  * @acc_flags: access flags which require special behavior
54457052d29SLijo Lazar  * @xcc_id: xcc accelerated compute core id
54557052d29SLijo Lazar  *
54657052d29SLijo Lazar  * Writes the value specified to the offset specified.
54757052d29SLijo Lazar  */
54857052d29SLijo Lazar void amdgpu_device_xcc_wreg(struct amdgpu_device *adev, uint32_t reg,
54957052d29SLijo Lazar 			    uint32_t v, uint32_t acc_flags, uint32_t xcc_id)
55057052d29SLijo Lazar {
55157052d29SLijo Lazar 	uint32_t rlcg_flag;
55257052d29SLijo Lazar 
55357052d29SLijo Lazar 	if (amdgpu_device_skip_hw_access(adev))
55457052d29SLijo Lazar 		return;
55557052d29SLijo Lazar 
55657052d29SLijo Lazar 	if ((reg * 4) < adev->rmmio_size) {
55757052d29SLijo Lazar 		if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_runtime(adev) &&
55857052d29SLijo Lazar 		    adev->gfx.rlc.rlcg_reg_access_supported &&
55957052d29SLijo Lazar 		    amdgpu_virt_get_rlcg_reg_access_flag(
56057052d29SLijo Lazar 			    adev, acc_flags, GC_HWIP, true, &rlcg_flag)) {
56157052d29SLijo Lazar 			amdgpu_virt_rlcg_reg_rw(adev, reg, v, rlcg_flag,
56257052d29SLijo Lazar 						GET_INST(GC, xcc_id));
56357052d29SLijo Lazar 		} else if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
56457052d29SLijo Lazar 			   amdgpu_sriov_runtime(adev) &&
56557052d29SLijo Lazar 			   down_read_trylock(&adev->reset_domain->sem)) {
56657052d29SLijo Lazar 			amdgpu_kiq_wreg(adev, reg, v, xcc_id);
56757052d29SLijo Lazar 			up_read(&adev->reset_domain->sem);
56857052d29SLijo Lazar 		} else {
56957052d29SLijo Lazar 			writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
57057052d29SLijo Lazar 		}
57157052d29SLijo Lazar 	} else {
572e84d7e71SLijo Lazar 		amdgpu_reg_pcie_wr32(adev, reg * 4, v);
57357052d29SLijo Lazar 	}
57457052d29SLijo Lazar }
57557052d29SLijo Lazar 
57657052d29SLijo Lazar /**
57757052d29SLijo Lazar  * amdgpu_device_indirect_rreg - read an indirect register
57857052d29SLijo Lazar  *
57957052d29SLijo Lazar  * @adev: amdgpu_device pointer
58057052d29SLijo Lazar  * @reg_addr: indirect register address to read from
58157052d29SLijo Lazar  *
58257052d29SLijo Lazar  * Returns the value of indirect register @reg_addr
58357052d29SLijo Lazar  */
58457052d29SLijo Lazar u32 amdgpu_device_indirect_rreg(struct amdgpu_device *adev, u32 reg_addr)
58557052d29SLijo Lazar {
58657052d29SLijo Lazar 	unsigned long flags, pcie_index, pcie_data;
58757052d29SLijo Lazar 	void __iomem *pcie_index_offset;
58857052d29SLijo Lazar 	void __iomem *pcie_data_offset;
58957052d29SLijo Lazar 	u32 r;
59057052d29SLijo Lazar 
59157052d29SLijo Lazar 	pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
59257052d29SLijo Lazar 	pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
59357052d29SLijo Lazar 
594b2d55124SLijo Lazar 	spin_lock_irqsave(&adev->reg.pcie.lock, flags);
59557052d29SLijo Lazar 	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
59657052d29SLijo Lazar 	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
59757052d29SLijo Lazar 
59857052d29SLijo Lazar 	writel(reg_addr, pcie_index_offset);
59957052d29SLijo Lazar 	readl(pcie_index_offset);
60057052d29SLijo Lazar 	r = readl(pcie_data_offset);
601b2d55124SLijo Lazar 	spin_unlock_irqrestore(&adev->reg.pcie.lock, flags);
60257052d29SLijo Lazar 
60357052d29SLijo Lazar 	return r;
60457052d29SLijo Lazar }
60557052d29SLijo Lazar 
60657052d29SLijo Lazar u32 amdgpu_device_indirect_rreg_ext(struct amdgpu_device *adev, u64 reg_addr)
60757052d29SLijo Lazar {
60857052d29SLijo Lazar 	unsigned long flags, pcie_index, pcie_index_hi, pcie_data;
60957052d29SLijo Lazar 	u32 r;
61057052d29SLijo Lazar 	void __iomem *pcie_index_offset;
61157052d29SLijo Lazar 	void __iomem *pcie_index_hi_offset;
61257052d29SLijo Lazar 	void __iomem *pcie_data_offset;
61357052d29SLijo Lazar 
61457052d29SLijo Lazar 	if (unlikely(!adev->nbio.funcs)) {
61557052d29SLijo Lazar 		pcie_index = AMDGPU_PCIE_INDEX_FALLBACK;
61657052d29SLijo Lazar 		pcie_data = AMDGPU_PCIE_DATA_FALLBACK;
61757052d29SLijo Lazar 	} else {
61857052d29SLijo Lazar 		pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
61957052d29SLijo Lazar 		pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
62057052d29SLijo Lazar 	}
62157052d29SLijo Lazar 
62257052d29SLijo Lazar 	if (reg_addr >> 32) {
62357052d29SLijo Lazar 		if (unlikely(!adev->nbio.funcs))
62457052d29SLijo Lazar 			pcie_index_hi = AMDGPU_PCIE_INDEX_HI_FALLBACK;
62557052d29SLijo Lazar 		else
62657052d29SLijo Lazar 			pcie_index_hi =
62757052d29SLijo Lazar 				adev->nbio.funcs->get_pcie_index_hi_offset(
62857052d29SLijo Lazar 					adev);
62957052d29SLijo Lazar 	} else {
63057052d29SLijo Lazar 		pcie_index_hi = 0;
63157052d29SLijo Lazar 	}
63257052d29SLijo Lazar 
633b2d55124SLijo Lazar 	spin_lock_irqsave(&adev->reg.pcie.lock, flags);
63457052d29SLijo Lazar 	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
63557052d29SLijo Lazar 	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
63657052d29SLijo Lazar 	if (pcie_index_hi != 0)
63757052d29SLijo Lazar 		pcie_index_hi_offset =
63857052d29SLijo Lazar 			(void __iomem *)adev->rmmio + pcie_index_hi * 4;
63957052d29SLijo Lazar 
64057052d29SLijo Lazar 	writel(reg_addr, pcie_index_offset);
64157052d29SLijo Lazar 	readl(pcie_index_offset);
64257052d29SLijo Lazar 	if (pcie_index_hi != 0) {
64357052d29SLijo Lazar 		writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset);
64457052d29SLijo Lazar 		readl(pcie_index_hi_offset);
64557052d29SLijo Lazar 	}
64657052d29SLijo Lazar 	r = readl(pcie_data_offset);
64757052d29SLijo Lazar 
64857052d29SLijo Lazar 	/* clear the high bits */
64957052d29SLijo Lazar 	if (pcie_index_hi != 0) {
65057052d29SLijo Lazar 		writel(0, pcie_index_hi_offset);
65157052d29SLijo Lazar 		readl(pcie_index_hi_offset);
65257052d29SLijo Lazar 	}
65357052d29SLijo Lazar 
654b2d55124SLijo Lazar 	spin_unlock_irqrestore(&adev->reg.pcie.lock, flags);
65557052d29SLijo Lazar 
65657052d29SLijo Lazar 	return r;
65757052d29SLijo Lazar }
65857052d29SLijo Lazar 
65957052d29SLijo Lazar /**
66057052d29SLijo Lazar  * amdgpu_device_indirect_rreg64 - read a 64bits indirect register
66157052d29SLijo Lazar  *
66257052d29SLijo Lazar  * @adev: amdgpu_device pointer
66357052d29SLijo Lazar  * @reg_addr: indirect register address to read from
66457052d29SLijo Lazar  *
66557052d29SLijo Lazar  * Returns the value of indirect register @reg_addr
66657052d29SLijo Lazar  */
66757052d29SLijo Lazar u64 amdgpu_device_indirect_rreg64(struct amdgpu_device *adev, u32 reg_addr)
66857052d29SLijo Lazar {
66957052d29SLijo Lazar 	unsigned long flags, pcie_index, pcie_data;
67057052d29SLijo Lazar 	void __iomem *pcie_index_offset;
67157052d29SLijo Lazar 	void __iomem *pcie_data_offset;
67257052d29SLijo Lazar 	u64 r;
67357052d29SLijo Lazar 
67457052d29SLijo Lazar 	pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
67557052d29SLijo Lazar 	pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
67657052d29SLijo Lazar 
677b2d55124SLijo Lazar 	spin_lock_irqsave(&adev->reg.pcie.lock, flags);
67857052d29SLijo Lazar 	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
67957052d29SLijo Lazar 	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
68057052d29SLijo Lazar 
68157052d29SLijo Lazar 	/* read low 32 bits */
68257052d29SLijo Lazar 	writel(reg_addr, pcie_index_offset);
68357052d29SLijo Lazar 	readl(pcie_index_offset);
68457052d29SLijo Lazar 	r = readl(pcie_data_offset);
68557052d29SLijo Lazar 	/* read high 32 bits */
68657052d29SLijo Lazar 	writel(reg_addr + 4, pcie_index_offset);
68757052d29SLijo Lazar 	readl(pcie_index_offset);
68857052d29SLijo Lazar 	r |= ((u64)readl(pcie_data_offset) << 32);
689b2d55124SLijo Lazar 	spin_unlock_irqrestore(&adev->reg.pcie.lock, flags);
69057052d29SLijo Lazar 
69157052d29SLijo Lazar 	return r;
69257052d29SLijo Lazar }
69357052d29SLijo Lazar 
69457052d29SLijo Lazar u64 amdgpu_device_indirect_rreg64_ext(struct amdgpu_device *adev, u64 reg_addr)
69557052d29SLijo Lazar {
69657052d29SLijo Lazar 	unsigned long flags, pcie_index, pcie_data;
69757052d29SLijo Lazar 	unsigned long pcie_index_hi = 0;
69857052d29SLijo Lazar 	void __iomem *pcie_index_offset;
69957052d29SLijo Lazar 	void __iomem *pcie_index_hi_offset;
70057052d29SLijo Lazar 	void __iomem *pcie_data_offset;
70157052d29SLijo Lazar 	u64 r;
70257052d29SLijo Lazar 
70357052d29SLijo Lazar 	pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
70457052d29SLijo Lazar 	pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
70557052d29SLijo Lazar 	if ((reg_addr >> 32) && (adev->nbio.funcs->get_pcie_index_hi_offset))
70657052d29SLijo Lazar 		pcie_index_hi =
70757052d29SLijo Lazar 			adev->nbio.funcs->get_pcie_index_hi_offset(adev);
70857052d29SLijo Lazar 
709b2d55124SLijo Lazar 	spin_lock_irqsave(&adev->reg.pcie.lock, flags);
71057052d29SLijo Lazar 	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
71157052d29SLijo Lazar 	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
71257052d29SLijo Lazar 	if (pcie_index_hi != 0)
71357052d29SLijo Lazar 		pcie_index_hi_offset =
71457052d29SLijo Lazar 			(void __iomem *)adev->rmmio + pcie_index_hi * 4;
71557052d29SLijo Lazar 
71657052d29SLijo Lazar 	/* read low 32 bits */
71757052d29SLijo Lazar 	writel(reg_addr, pcie_index_offset);
71857052d29SLijo Lazar 	readl(pcie_index_offset);
71957052d29SLijo Lazar 	if (pcie_index_hi != 0) {
72057052d29SLijo Lazar 		writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset);
72157052d29SLijo Lazar 		readl(pcie_index_hi_offset);
72257052d29SLijo Lazar 	}
72357052d29SLijo Lazar 	r = readl(pcie_data_offset);
72457052d29SLijo Lazar 	/* read high 32 bits */
72557052d29SLijo Lazar 	writel(reg_addr + 4, pcie_index_offset);
72657052d29SLijo Lazar 	readl(pcie_index_offset);
72757052d29SLijo Lazar 	if (pcie_index_hi != 0) {
72857052d29SLijo Lazar 		writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset);
72957052d29SLijo Lazar 		readl(pcie_index_hi_offset);
73057052d29SLijo Lazar 	}
73157052d29SLijo Lazar 	r |= ((u64)readl(pcie_data_offset) << 32);
73257052d29SLijo Lazar 
73357052d29SLijo Lazar 	/* clear the high bits */
73457052d29SLijo Lazar 	if (pcie_index_hi != 0) {
73557052d29SLijo Lazar 		writel(0, pcie_index_hi_offset);
73657052d29SLijo Lazar 		readl(pcie_index_hi_offset);
73757052d29SLijo Lazar 	}
73857052d29SLijo Lazar 
739b2d55124SLijo Lazar 	spin_unlock_irqrestore(&adev->reg.pcie.lock, flags);
74057052d29SLijo Lazar 
74157052d29SLijo Lazar 	return r;
74257052d29SLijo Lazar }
74357052d29SLijo Lazar 
74457052d29SLijo Lazar /**
74557052d29SLijo Lazar  * amdgpu_device_indirect_wreg - write an indirect register address
74657052d29SLijo Lazar  *
74757052d29SLijo Lazar  * @adev: amdgpu_device pointer
74857052d29SLijo Lazar  * @reg_addr: indirect register offset
74957052d29SLijo Lazar  * @reg_data: indirect register data
75057052d29SLijo Lazar  *
75157052d29SLijo Lazar  */
75257052d29SLijo Lazar void amdgpu_device_indirect_wreg(struct amdgpu_device *adev, u32 reg_addr,
75357052d29SLijo Lazar 				 u32 reg_data)
75457052d29SLijo Lazar {
75557052d29SLijo Lazar 	unsigned long flags, pcie_index, pcie_data;
75657052d29SLijo Lazar 	void __iomem *pcie_index_offset;
75757052d29SLijo Lazar 	void __iomem *pcie_data_offset;
75857052d29SLijo Lazar 
75957052d29SLijo Lazar 	pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
76057052d29SLijo Lazar 	pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
76157052d29SLijo Lazar 
762b2d55124SLijo Lazar 	spin_lock_irqsave(&adev->reg.pcie.lock, flags);
76357052d29SLijo Lazar 	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
76457052d29SLijo Lazar 	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
76557052d29SLijo Lazar 
76657052d29SLijo Lazar 	writel(reg_addr, pcie_index_offset);
76757052d29SLijo Lazar 	readl(pcie_index_offset);
76857052d29SLijo Lazar 	writel(reg_data, pcie_data_offset);
76957052d29SLijo Lazar 	readl(pcie_data_offset);
770b2d55124SLijo Lazar 	spin_unlock_irqrestore(&adev->reg.pcie.lock, flags);
77157052d29SLijo Lazar }
77257052d29SLijo Lazar 
77357052d29SLijo Lazar void amdgpu_device_indirect_wreg_ext(struct amdgpu_device *adev, u64 reg_addr,
77457052d29SLijo Lazar 				     u32 reg_data)
77557052d29SLijo Lazar {
77657052d29SLijo Lazar 	unsigned long flags, pcie_index, pcie_index_hi, pcie_data;
77757052d29SLijo Lazar 	void __iomem *pcie_index_offset;
77857052d29SLijo Lazar 	void __iomem *pcie_index_hi_offset;
77957052d29SLijo Lazar 	void __iomem *pcie_data_offset;
78057052d29SLijo Lazar 
78157052d29SLijo Lazar 	pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
78257052d29SLijo Lazar 	pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
78357052d29SLijo Lazar 	if ((reg_addr >> 32) && (adev->nbio.funcs->get_pcie_index_hi_offset))
78457052d29SLijo Lazar 		pcie_index_hi =
78557052d29SLijo Lazar 			adev->nbio.funcs->get_pcie_index_hi_offset(adev);
78657052d29SLijo Lazar 	else
78757052d29SLijo Lazar 		pcie_index_hi = 0;
78857052d29SLijo Lazar 
789b2d55124SLijo Lazar 	spin_lock_irqsave(&adev->reg.pcie.lock, flags);
79057052d29SLijo Lazar 	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
79157052d29SLijo Lazar 	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
79257052d29SLijo Lazar 	if (pcie_index_hi != 0)
79357052d29SLijo Lazar 		pcie_index_hi_offset =
79457052d29SLijo Lazar 			(void __iomem *)adev->rmmio + pcie_index_hi * 4;
79557052d29SLijo Lazar 
79657052d29SLijo Lazar 	writel(reg_addr, pcie_index_offset);
79757052d29SLijo Lazar 	readl(pcie_index_offset);
79857052d29SLijo Lazar 	if (pcie_index_hi != 0) {
79957052d29SLijo Lazar 		writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset);
80057052d29SLijo Lazar 		readl(pcie_index_hi_offset);
80157052d29SLijo Lazar 	}
80257052d29SLijo Lazar 	writel(reg_data, pcie_data_offset);
80357052d29SLijo Lazar 	readl(pcie_data_offset);
80457052d29SLijo Lazar 
80557052d29SLijo Lazar 	/* clear the high bits */
80657052d29SLijo Lazar 	if (pcie_index_hi != 0) {
80757052d29SLijo Lazar 		writel(0, pcie_index_hi_offset);
80857052d29SLijo Lazar 		readl(pcie_index_hi_offset);
80957052d29SLijo Lazar 	}
81057052d29SLijo Lazar 
811b2d55124SLijo Lazar 	spin_unlock_irqrestore(&adev->reg.pcie.lock, flags);
81257052d29SLijo Lazar }
81357052d29SLijo Lazar 
81457052d29SLijo Lazar /**
81557052d29SLijo Lazar  * amdgpu_device_indirect_wreg64 - write a 64bits indirect register address
81657052d29SLijo Lazar  *
81757052d29SLijo Lazar  * @adev: amdgpu_device pointer
81857052d29SLijo Lazar  * @reg_addr: indirect register offset
81957052d29SLijo Lazar  * @reg_data: indirect register data
82057052d29SLijo Lazar  *
82157052d29SLijo Lazar  */
82257052d29SLijo Lazar void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev, u32 reg_addr,
82357052d29SLijo Lazar 				   u64 reg_data)
82457052d29SLijo Lazar {
82557052d29SLijo Lazar 	unsigned long flags, pcie_index, pcie_data;
82657052d29SLijo Lazar 	void __iomem *pcie_index_offset;
82757052d29SLijo Lazar 	void __iomem *pcie_data_offset;
82857052d29SLijo Lazar 
82957052d29SLijo Lazar 	pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
83057052d29SLijo Lazar 	pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
83157052d29SLijo Lazar 
832b2d55124SLijo Lazar 	spin_lock_irqsave(&adev->reg.pcie.lock, flags);
83357052d29SLijo Lazar 	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
83457052d29SLijo Lazar 	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
83557052d29SLijo Lazar 
83657052d29SLijo Lazar 	/* write low 32 bits */
83757052d29SLijo Lazar 	writel(reg_addr, pcie_index_offset);
83857052d29SLijo Lazar 	readl(pcie_index_offset);
83957052d29SLijo Lazar 	writel((u32)(reg_data & 0xffffffffULL), pcie_data_offset);
84057052d29SLijo Lazar 	readl(pcie_data_offset);
84157052d29SLijo Lazar 	/* write high 32 bits */
84257052d29SLijo Lazar 	writel(reg_addr + 4, pcie_index_offset);
84357052d29SLijo Lazar 	readl(pcie_index_offset);
84457052d29SLijo Lazar 	writel((u32)(reg_data >> 32), pcie_data_offset);
84557052d29SLijo Lazar 	readl(pcie_data_offset);
846b2d55124SLijo Lazar 	spin_unlock_irqrestore(&adev->reg.pcie.lock, flags);
84757052d29SLijo Lazar }
84857052d29SLijo Lazar 
84957052d29SLijo Lazar void amdgpu_device_indirect_wreg64_ext(struct amdgpu_device *adev, u64 reg_addr,
85057052d29SLijo Lazar 				       u64 reg_data)
85157052d29SLijo Lazar {
85257052d29SLijo Lazar 	unsigned long flags, pcie_index, pcie_data;
85357052d29SLijo Lazar 	unsigned long pcie_index_hi = 0;
85457052d29SLijo Lazar 	void __iomem *pcie_index_offset;
85557052d29SLijo Lazar 	void __iomem *pcie_index_hi_offset;
85657052d29SLijo Lazar 	void __iomem *pcie_data_offset;
85757052d29SLijo Lazar 
85857052d29SLijo Lazar 	pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
85957052d29SLijo Lazar 	pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
86057052d29SLijo Lazar 	if ((reg_addr >> 32) && (adev->nbio.funcs->get_pcie_index_hi_offset))
86157052d29SLijo Lazar 		pcie_index_hi =
86257052d29SLijo Lazar 			adev->nbio.funcs->get_pcie_index_hi_offset(adev);
86357052d29SLijo Lazar 
864b2d55124SLijo Lazar 	spin_lock_irqsave(&adev->reg.pcie.lock, flags);
86557052d29SLijo Lazar 	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
86657052d29SLijo Lazar 	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
86757052d29SLijo Lazar 	if (pcie_index_hi != 0)
86857052d29SLijo Lazar 		pcie_index_hi_offset =
86957052d29SLijo Lazar 			(void __iomem *)adev->rmmio + pcie_index_hi * 4;
87057052d29SLijo Lazar 
87157052d29SLijo Lazar 	/* write low 32 bits */
87257052d29SLijo Lazar 	writel(reg_addr, pcie_index_offset);
87357052d29SLijo Lazar 	readl(pcie_index_offset);
87457052d29SLijo Lazar 	if (pcie_index_hi != 0) {
87557052d29SLijo Lazar 		writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset);
87657052d29SLijo Lazar 		readl(pcie_index_hi_offset);
87757052d29SLijo Lazar 	}
87857052d29SLijo Lazar 	writel((u32)(reg_data & 0xffffffffULL), pcie_data_offset);
87957052d29SLijo Lazar 	readl(pcie_data_offset);
88057052d29SLijo Lazar 	/* write high 32 bits */
88157052d29SLijo Lazar 	writel(reg_addr + 4, pcie_index_offset);
88257052d29SLijo Lazar 	readl(pcie_index_offset);
88357052d29SLijo Lazar 	if (pcie_index_hi != 0) {
88457052d29SLijo Lazar 		writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset);
88557052d29SLijo Lazar 		readl(pcie_index_hi_offset);
88657052d29SLijo Lazar 	}
88757052d29SLijo Lazar 	writel((u32)(reg_data >> 32), pcie_data_offset);
88857052d29SLijo Lazar 	readl(pcie_data_offset);
88957052d29SLijo Lazar 
89057052d29SLijo Lazar 	/* clear the high bits */
89157052d29SLijo Lazar 	if (pcie_index_hi != 0) {
89257052d29SLijo Lazar 		writel(0, pcie_index_hi_offset);
89357052d29SLijo Lazar 		readl(pcie_index_hi_offset);
89457052d29SLijo Lazar 	}
89557052d29SLijo Lazar 
896b2d55124SLijo Lazar 	spin_unlock_irqrestore(&adev->reg.pcie.lock, flags);
89757052d29SLijo Lazar }
89857052d29SLijo Lazar 
89957052d29SLijo Lazar u32 amdgpu_device_pcie_port_rreg(struct amdgpu_device *adev, u32 reg)
90057052d29SLijo Lazar {
90157052d29SLijo Lazar 	unsigned long flags, address, data;
90257052d29SLijo Lazar 	u32 r;
90357052d29SLijo Lazar 
90457052d29SLijo Lazar 	address = adev->nbio.funcs->get_pcie_port_index_offset(adev);
90557052d29SLijo Lazar 	data = adev->nbio.funcs->get_pcie_port_data_offset(adev);
90657052d29SLijo Lazar 
907b2d55124SLijo Lazar 	spin_lock_irqsave(&adev->reg.pcie.lock, flags);
90857052d29SLijo Lazar 	WREG32(address, reg * 4);
90957052d29SLijo Lazar 	(void)RREG32(address);
91057052d29SLijo Lazar 	r = RREG32(data);
911b2d55124SLijo Lazar 	spin_unlock_irqrestore(&adev->reg.pcie.lock, flags);
91257052d29SLijo Lazar 	return r;
91357052d29SLijo Lazar }
91457052d29SLijo Lazar 
91557052d29SLijo Lazar void amdgpu_device_pcie_port_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
91657052d29SLijo Lazar {
91757052d29SLijo Lazar 	unsigned long flags, address, data;
91857052d29SLijo Lazar 
91957052d29SLijo Lazar 	address = adev->nbio.funcs->get_pcie_port_index_offset(adev);
92057052d29SLijo Lazar 	data = adev->nbio.funcs->get_pcie_port_data_offset(adev);
92157052d29SLijo Lazar 
922b2d55124SLijo Lazar 	spin_lock_irqsave(&adev->reg.pcie.lock, flags);
92357052d29SLijo Lazar 	WREG32(address, reg * 4);
92457052d29SLijo Lazar 	(void)RREG32(address);
92557052d29SLijo Lazar 	WREG32(data, v);
92657052d29SLijo Lazar 	(void)RREG32(data);
927b2d55124SLijo Lazar 	spin_unlock_irqrestore(&adev->reg.pcie.lock, flags);
92857052d29SLijo Lazar }
92957052d29SLijo Lazar 
93057052d29SLijo Lazar uint32_t amdgpu_device_wait_on_rreg(struct amdgpu_device *adev, uint32_t inst,
93157052d29SLijo Lazar 				    uint32_t reg_addr, char reg_name[],
93257052d29SLijo Lazar 				    uint32_t expected_value, uint32_t mask)
93357052d29SLijo Lazar {
93457052d29SLijo Lazar 	uint32_t ret = 0;
93557052d29SLijo Lazar 	uint32_t old_ = 0;
93657052d29SLijo Lazar 	uint32_t tmp_ = RREG32(reg_addr);
93757052d29SLijo Lazar 	uint32_t loop = adev->usec_timeout;
93857052d29SLijo Lazar 
93957052d29SLijo Lazar 	while ((tmp_ & (mask)) != (expected_value)) {
94057052d29SLijo Lazar 		if (old_ != tmp_) {
94157052d29SLijo Lazar 			loop = adev->usec_timeout;
94257052d29SLijo Lazar 			old_ = tmp_;
94357052d29SLijo Lazar 		} else
94457052d29SLijo Lazar 			udelay(1);
94557052d29SLijo Lazar 		tmp_ = RREG32(reg_addr);
94657052d29SLijo Lazar 		loop--;
94757052d29SLijo Lazar 		if (!loop) {
94857052d29SLijo Lazar 			dev_warn(
94957052d29SLijo Lazar 				adev->dev,
95057052d29SLijo Lazar 				"Register(%d) [%s] failed to reach value 0x%08x != 0x%08xn",
95157052d29SLijo Lazar 				inst, reg_name, (uint32_t)expected_value,
95257052d29SLijo Lazar 				(uint32_t)(tmp_ & (mask)));
95357052d29SLijo Lazar 			ret = -ETIMEDOUT;
95457052d29SLijo Lazar 			break;
95557052d29SLijo Lazar 		}
95657052d29SLijo Lazar 	}
95757052d29SLijo Lazar 	return ret;
95857052d29SLijo Lazar }
959