1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /*
3 * Copyright 2014-2024 Advanced Micro Devices, Inc. All rights reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24 #include "amdgpu.h"
25 #include "amdgpu_jpeg.h"
26 #include "amdgpu_pm.h"
27 #include "soc15.h"
28 #include "soc15d.h"
29 #include "jpeg_v4_0_3.h"
30 #include "jpeg_v5_0_1.h"
31 #include "mmsch_v5_0.h"
32
33 #include "vcn/vcn_5_0_0_offset.h"
34 #include "vcn/vcn_5_0_0_sh_mask.h"
35 #include "ivsrcid/vcn/irqsrcs_vcn_5_0.h"
36
37 static int jpeg_v5_0_1_start_sriov(struct amdgpu_device *adev);
38 static void jpeg_v5_0_1_set_dec_ring_funcs(struct amdgpu_device *adev);
39 static void jpeg_v5_0_1_set_irq_funcs(struct amdgpu_device *adev);
40 static int jpeg_v5_0_1_set_powergating_state(struct amdgpu_ip_block *ip_block,
41 enum amd_powergating_state state);
42 static void jpeg_v5_0_1_set_ras_funcs(struct amdgpu_device *adev);
43 static void jpeg_v5_0_1_dec_ring_set_wptr(struct amdgpu_ring *ring);
44
45 static int amdgpu_ih_srcid_jpeg[] = {
46 VCN_5_0__SRCID__JPEG_DECODE,
47 VCN_5_0__SRCID__JPEG1_DECODE,
48 VCN_5_0__SRCID__JPEG2_DECODE,
49 VCN_5_0__SRCID__JPEG3_DECODE,
50 VCN_5_0__SRCID__JPEG4_DECODE,
51 VCN_5_0__SRCID__JPEG5_DECODE,
52 VCN_5_0__SRCID__JPEG6_DECODE,
53 VCN_5_0__SRCID__JPEG7_DECODE,
54 VCN_5_0__SRCID__JPEG8_DECODE,
55 VCN_5_0__SRCID__JPEG9_DECODE,
56 };
57
58 static const struct amdgpu_hwip_reg_entry jpeg_reg_list_5_0_1[] = {
59 SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JPEG_POWER_STATUS),
60 SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JPEG_INT_STAT),
61 SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC0_UVD_JRBC_RB_RPTR),
62 SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC0_UVD_JRBC_RB_WPTR),
63 SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC0_UVD_JRBC_STATUS),
64 SOC15_REG_ENTRY_STR(JPEG, 0, regJPEG_DEC_ADDR_MODE),
65 SOC15_REG_ENTRY_STR(JPEG, 0, regJPEG_DEC_GFX10_ADDR_CONFIG),
66 SOC15_REG_ENTRY_STR(JPEG, 0, regJPEG_DEC_Y_GFX10_TILING_SURFACE),
67 SOC15_REG_ENTRY_STR(JPEG, 0, regJPEG_DEC_UV_GFX10_TILING_SURFACE),
68 SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JPEG_PITCH),
69 SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JPEG_UV_PITCH),
70 SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC1_UVD_JRBC_RB_RPTR),
71 SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC1_UVD_JRBC_RB_WPTR),
72 SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC1_UVD_JRBC_STATUS),
73 SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC2_UVD_JRBC_RB_RPTR),
74 SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC2_UVD_JRBC_RB_WPTR),
75 SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC2_UVD_JRBC_STATUS),
76 SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC3_UVD_JRBC_RB_RPTR),
77 SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC3_UVD_JRBC_RB_WPTR),
78 SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC3_UVD_JRBC_STATUS),
79 SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC4_UVD_JRBC_RB_RPTR),
80 SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC4_UVD_JRBC_RB_WPTR),
81 SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC4_UVD_JRBC_STATUS),
82 SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC5_UVD_JRBC_RB_RPTR),
83 SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC5_UVD_JRBC_RB_WPTR),
84 SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC5_UVD_JRBC_STATUS),
85 SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC6_UVD_JRBC_RB_RPTR),
86 SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC6_UVD_JRBC_RB_WPTR),
87 SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC6_UVD_JRBC_STATUS),
88 SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC7_UVD_JRBC_RB_RPTR),
89 SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC7_UVD_JRBC_RB_WPTR),
90 SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC7_UVD_JRBC_STATUS),
91 SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC8_UVD_JRBC_RB_RPTR),
92 SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC8_UVD_JRBC_RB_WPTR),
93 SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC8_UVD_JRBC_STATUS),
94 SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC9_UVD_JRBC_RB_RPTR),
95 SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC9_UVD_JRBC_RB_WPTR),
96 SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC9_UVD_JRBC_STATUS),
97 };
98
jpeg_v5_0_1_core_reg_offset(u32 pipe)99 static int jpeg_v5_0_1_core_reg_offset(u32 pipe)
100 {
101 if (pipe <= AMDGPU_MAX_JPEG_RINGS_4_0_3)
102 return ((0x40 * pipe) - 0xc80);
103 else
104 return ((0x40 * pipe) - 0x440);
105 }
106
107 /**
108 * jpeg_v5_0_1_early_init - set function pointers
109 *
110 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
111 *
112 * Set ring and irq function pointers
113 */
jpeg_v5_0_1_early_init(struct amdgpu_ip_block * ip_block)114 static int jpeg_v5_0_1_early_init(struct amdgpu_ip_block *ip_block)
115 {
116 struct amdgpu_device *adev = ip_block->adev;
117
118 if (!adev->jpeg.num_jpeg_inst || adev->jpeg.num_jpeg_inst > AMDGPU_MAX_JPEG_INSTANCES)
119 return -ENOENT;
120
121 adev->jpeg.num_jpeg_rings = AMDGPU_MAX_JPEG_RINGS;
122 jpeg_v5_0_1_set_dec_ring_funcs(adev);
123 jpeg_v5_0_1_set_irq_funcs(adev);
124 jpeg_v5_0_1_set_ras_funcs(adev);
125
126 return 0;
127 }
128
129 /**
130 * jpeg_v5_0_1_sw_init - sw init for JPEG block
131 *
132 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
133 *
134 * Load firmware and sw initialization
135 */
jpeg_v5_0_1_sw_init(struct amdgpu_ip_block * ip_block)136 static int jpeg_v5_0_1_sw_init(struct amdgpu_ip_block *ip_block)
137 {
138 struct amdgpu_device *adev = ip_block->adev;
139 struct amdgpu_ring *ring;
140 int i, j, r, jpeg_inst;
141
142 for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {
143 /* JPEG TRAP */
144 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
145 amdgpu_ih_srcid_jpeg[j], &adev->jpeg.inst->irq);
146 if (r)
147 return r;
148 }
149 /* JPEG DJPEG POISON EVENT */
150 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
151 VCN_5_0__SRCID_DJPEG0_POISON, &adev->jpeg.inst->ras_poison_irq);
152 if (r)
153 return r;
154
155 /* JPEG EJPEG POISON EVENT */
156 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
157 VCN_5_0__SRCID_EJPEG0_POISON, &adev->jpeg.inst->ras_poison_irq);
158 if (r)
159 return r;
160
161 r = amdgpu_jpeg_sw_init(adev);
162 if (r)
163 return r;
164
165 r = amdgpu_jpeg_resume(adev);
166 if (r)
167 return r;
168
169 for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
170 jpeg_inst = GET_INST(JPEG, i);
171
172 for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {
173 ring = &adev->jpeg.inst[i].ring_dec[j];
174 ring->use_doorbell = true;
175 ring->vm_hub = AMDGPU_MMHUB0(adev->jpeg.inst[i].aid_id);
176 if (!amdgpu_sriov_vf(adev)) {
177 ring->doorbell_index =
178 (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
179 1 + j + 11 * jpeg_inst;
180 } else {
181 ring->doorbell_index =
182 (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
183 2 + j + 32 * jpeg_inst;
184 }
185 sprintf(ring->name, "jpeg_dec_%d.%d", adev->jpeg.inst[i].aid_id, j);
186 r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq, 0,
187 AMDGPU_RING_PRIO_DEFAULT, NULL);
188 if (r)
189 return r;
190
191 adev->jpeg.internal.jpeg_pitch[j] =
192 regUVD_JRBC0_UVD_JRBC_SCRATCH0_INTERNAL_OFFSET;
193 adev->jpeg.inst[i].external.jpeg_pitch[j] =
194 SOC15_REG_OFFSET1(JPEG, jpeg_inst, regUVD_JRBC_SCRATCH0,
195 (j ? jpeg_v5_0_1_core_reg_offset(j) : 0));
196 }
197 }
198
199 if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__JPEG)) {
200 r = amdgpu_jpeg_ras_sw_init(adev);
201 if (r) {
202 dev_err(adev->dev, "Failed to initialize jpeg ras block!\n");
203 return r;
204 }
205 }
206
207 r = amdgpu_jpeg_reg_dump_init(adev, jpeg_reg_list_5_0_1, ARRAY_SIZE(jpeg_reg_list_5_0_1));
208 if (r)
209 return r;
210
211 adev->jpeg.supported_reset =
212 amdgpu_get_soft_full_reset_mask(&adev->jpeg.inst[0].ring_dec[0]);
213 if (!amdgpu_sriov_vf(adev))
214 adev->jpeg.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
215 r = amdgpu_jpeg_sysfs_reset_mask_init(adev);
216
217 return r;
218 }
219
220 /**
221 * jpeg_v5_0_1_sw_fini - sw fini for JPEG block
222 *
223 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
224 *
225 * JPEG suspend and free up sw allocation
226 */
jpeg_v5_0_1_sw_fini(struct amdgpu_ip_block * ip_block)227 static int jpeg_v5_0_1_sw_fini(struct amdgpu_ip_block *ip_block)
228 {
229 struct amdgpu_device *adev = ip_block->adev;
230 int r;
231
232 r = amdgpu_jpeg_suspend(adev);
233 if (r)
234 return r;
235
236 amdgpu_jpeg_sysfs_reset_mask_fini(adev);
237
238 r = amdgpu_jpeg_sw_fini(adev);
239
240 return r;
241 }
242
243 /**
244 * jpeg_v5_0_1_hw_init - start and test JPEG block
245 *
246 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
247 *
248 */
jpeg_v5_0_1_hw_init(struct amdgpu_ip_block * ip_block)249 static int jpeg_v5_0_1_hw_init(struct amdgpu_ip_block *ip_block)
250 {
251 struct amdgpu_device *adev = ip_block->adev;
252 struct amdgpu_ring *ring;
253 int i, j, r, jpeg_inst;
254
255 if (amdgpu_sriov_vf(adev)) {
256 r = jpeg_v5_0_1_start_sriov(adev);
257 if (r)
258 return r;
259
260 for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
261 for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {
262 ring = &adev->jpeg.inst[i].ring_dec[j];
263 ring->wptr = 0;
264 ring->wptr_old = 0;
265 jpeg_v5_0_1_dec_ring_set_wptr(ring);
266 ring->sched.ready = true;
267 }
268 }
269 return 0;
270 }
271 if (RREG32_SOC15(VCN, GET_INST(VCN, 0), regVCN_RRMT_CNTL) & 0x100)
272 adev->jpeg.caps |= AMDGPU_JPEG_CAPS(RRMT_ENABLED);
273
274 for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
275 jpeg_inst = GET_INST(JPEG, i);
276 ring = adev->jpeg.inst[i].ring_dec;
277 if (ring->use_doorbell)
278 adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
279 (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 11 * jpeg_inst,
280 adev->jpeg.inst[i].aid_id);
281
282 for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {
283 ring = &adev->jpeg.inst[i].ring_dec[j];
284 if (ring->use_doorbell)
285 WREG32_SOC15_OFFSET(VCN, GET_INST(VCN, i), regVCN_JPEG_DB_CTRL,
286 ring->pipe,
287 ring->doorbell_index <<
288 VCN_JPEG_DB_CTRL__OFFSET__SHIFT |
289 VCN_JPEG_DB_CTRL__EN_MASK);
290 r = amdgpu_ring_test_helper(ring);
291 if (r)
292 return r;
293 }
294 }
295
296 return 0;
297 }
298
299 /**
300 * jpeg_v5_0_1_hw_fini - stop the hardware block
301 *
302 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
303 *
304 * Stop the JPEG block, mark ring as not ready any more
305 */
jpeg_v5_0_1_hw_fini(struct amdgpu_ip_block * ip_block)306 static int jpeg_v5_0_1_hw_fini(struct amdgpu_ip_block *ip_block)
307 {
308 struct amdgpu_device *adev = ip_block->adev;
309 int ret = 0;
310
311 cancel_delayed_work_sync(&adev->jpeg.idle_work);
312
313 if (!amdgpu_sriov_vf(adev)) {
314 if (adev->jpeg.cur_state != AMD_PG_STATE_GATE)
315 ret = jpeg_v5_0_1_set_powergating_state(ip_block, AMD_PG_STATE_GATE);
316 }
317
318 if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__JPEG) && !amdgpu_sriov_vf(adev))
319 amdgpu_irq_put(adev, &adev->jpeg.inst->ras_poison_irq, 0);
320
321 return ret;
322 }
323
324 /**
325 * jpeg_v5_0_1_suspend - suspend JPEG block
326 *
327 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
328 *
329 * HW fini and suspend JPEG block
330 */
jpeg_v5_0_1_suspend(struct amdgpu_ip_block * ip_block)331 static int jpeg_v5_0_1_suspend(struct amdgpu_ip_block *ip_block)
332 {
333 struct amdgpu_device *adev = ip_block->adev;
334 int r;
335
336 r = jpeg_v5_0_1_hw_fini(ip_block);
337 if (r)
338 return r;
339
340 r = amdgpu_jpeg_suspend(adev);
341
342 return r;
343 }
344
345 /**
346 * jpeg_v5_0_1_resume - resume JPEG block
347 *
348 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
349 *
350 * Resume firmware and hw init JPEG block
351 */
jpeg_v5_0_1_resume(struct amdgpu_ip_block * ip_block)352 static int jpeg_v5_0_1_resume(struct amdgpu_ip_block *ip_block)
353 {
354 struct amdgpu_device *adev = ip_block->adev;
355 int r;
356
357 r = amdgpu_jpeg_resume(adev);
358 if (r)
359 return r;
360
361 r = jpeg_v5_0_1_hw_init(ip_block);
362
363 return r;
364 }
365
jpeg_v5_0_1_init_inst(struct amdgpu_device * adev,int i)366 static void jpeg_v5_0_1_init_inst(struct amdgpu_device *adev, int i)
367 {
368 int jpeg_inst = GET_INST(JPEG, i);
369
370 /* disable anti hang mechanism */
371 WREG32_P(SOC15_REG_OFFSET(JPEG, jpeg_inst, regUVD_JPEG_POWER_STATUS), 0,
372 ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK);
373
374 /* keep the JPEG in static PG mode */
375 WREG32_P(SOC15_REG_OFFSET(JPEG, jpeg_inst, regUVD_JPEG_POWER_STATUS), 0,
376 ~UVD_JPEG_POWER_STATUS__JPEG_PG_MODE_MASK);
377
378 /* MJPEG global tiling registers */
379 WREG32_SOC15(JPEG, 0, regJPEG_DEC_GFX10_ADDR_CONFIG,
380 adev->gfx.config.gb_addr_config);
381
382 /* enable JMI channel */
383 WREG32_P(SOC15_REG_OFFSET(JPEG, jpeg_inst, regUVD_JMI_CNTL), 0,
384 ~UVD_JMI_CNTL__SOFT_RESET_MASK);
385 }
386
jpeg_v5_0_1_deinit_inst(struct amdgpu_device * adev,int i)387 static void jpeg_v5_0_1_deinit_inst(struct amdgpu_device *adev, int i)
388 {
389 int jpeg_inst = GET_INST(JPEG, i);
390 /* reset JMI */
391 WREG32_P(SOC15_REG_OFFSET(JPEG, jpeg_inst, regUVD_JMI_CNTL),
392 UVD_JMI_CNTL__SOFT_RESET_MASK,
393 ~UVD_JMI_CNTL__SOFT_RESET_MASK);
394
395 /* enable anti hang mechanism */
396 WREG32_P(SOC15_REG_OFFSET(JPEG, jpeg_inst, regUVD_JPEG_POWER_STATUS),
397 UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK,
398 ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK);
399 }
400
jpeg_v5_0_1_init_jrbc(struct amdgpu_ring * ring)401 static void jpeg_v5_0_1_init_jrbc(struct amdgpu_ring *ring)
402 {
403 struct amdgpu_device *adev = ring->adev;
404 u32 reg, data, mask;
405 int jpeg_inst = GET_INST(JPEG, ring->me);
406 int reg_offset = ring->pipe ? jpeg_v5_0_1_core_reg_offset(ring->pipe) : 0;
407
408 /* enable System Interrupt for JRBC */
409 reg = SOC15_REG_OFFSET(JPEG, jpeg_inst, regJPEG_SYS_INT_EN);
410 if (ring->pipe < AMDGPU_MAX_JPEG_RINGS_4_0_3) {
411 data = JPEG_SYS_INT_EN__DJRBC0_MASK << ring->pipe;
412 mask = ~(JPEG_SYS_INT_EN__DJRBC0_MASK << ring->pipe);
413 WREG32_P(reg, data, mask);
414 } else {
415 data = JPEG_SYS_INT_EN__DJRBC0_MASK << (ring->pipe+12);
416 mask = ~(JPEG_SYS_INT_EN__DJRBC0_MASK << (ring->pipe+12));
417 WREG32_P(reg, data, mask);
418 }
419
420 WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
421 regUVD_LMI_JRBC_RB_VMID,
422 reg_offset, 0);
423 WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
424 regUVD_JRBC_RB_CNTL,
425 reg_offset,
426 (0x00000001L | 0x00000002L));
427 WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
428 regUVD_LMI_JRBC_RB_64BIT_BAR_LOW,
429 reg_offset, lower_32_bits(ring->gpu_addr));
430 WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
431 regUVD_LMI_JRBC_RB_64BIT_BAR_HIGH,
432 reg_offset, upper_32_bits(ring->gpu_addr));
433 WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
434 regUVD_JRBC_RB_RPTR,
435 reg_offset, 0);
436 WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
437 regUVD_JRBC_RB_WPTR,
438 reg_offset, 0);
439 WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
440 regUVD_JRBC_RB_CNTL,
441 reg_offset, 0x00000002L);
442 WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
443 regUVD_JRBC_RB_SIZE,
444 reg_offset, ring->ring_size / 4);
445 ring->wptr = RREG32_SOC15_OFFSET(JPEG, jpeg_inst, regUVD_JRBC_RB_WPTR,
446 reg_offset);
447 }
448
jpeg_v5_0_1_start_sriov(struct amdgpu_device * adev)449 static int jpeg_v5_0_1_start_sriov(struct amdgpu_device *adev)
450 {
451 struct amdgpu_ring *ring;
452 uint64_t ctx_addr;
453 uint32_t param, resp, expected;
454 uint32_t tmp, timeout;
455
456 struct amdgpu_mm_table *table = &adev->virt.mm_table;
457 uint32_t *table_loc;
458 uint32_t table_size;
459 uint32_t size, size_dw, item_offset;
460 uint32_t init_status;
461 int i, j, jpeg_inst;
462
463 struct mmsch_v5_0_cmd_direct_write
464 direct_wt = { {0} };
465 struct mmsch_v5_0_cmd_end end = { {0} };
466 struct mmsch_v5_0_init_header header;
467
468 direct_wt.cmd_header.command_type =
469 MMSCH_COMMAND__DIRECT_REG_WRITE;
470 end.cmd_header.command_type =
471 MMSCH_COMMAND__END;
472
473 for (i = 0; i < adev->jpeg.num_jpeg_inst; i++) {
474 jpeg_inst = GET_INST(JPEG, i);
475
476 memset(&header, 0, sizeof(struct mmsch_v5_0_init_header));
477 header.version = MMSCH_VERSION;
478 header.total_size = sizeof(struct mmsch_v5_0_init_header) >> 2;
479
480 table_loc = (uint32_t *)table->cpu_addr;
481 table_loc += header.total_size;
482
483 item_offset = header.total_size;
484
485 for (j = 0; j < adev->jpeg.num_jpeg_rings; j++) {
486 ring = &adev->jpeg.inst[i].ring_dec[j];
487 table_size = 0;
488
489 tmp = SOC15_REG_OFFSET(JPEG, 0, regUVD_LMI_JRBC_RB_64BIT_BAR_LOW);
490 MMSCH_V5_0_INSERT_DIRECT_WT(tmp, lower_32_bits(ring->gpu_addr));
491 tmp = SOC15_REG_OFFSET(JPEG, 0, regUVD_LMI_JRBC_RB_64BIT_BAR_HIGH);
492 MMSCH_V5_0_INSERT_DIRECT_WT(tmp, upper_32_bits(ring->gpu_addr));
493 tmp = SOC15_REG_OFFSET(JPEG, 0, regUVD_JRBC_RB_SIZE);
494 MMSCH_V5_0_INSERT_DIRECT_WT(tmp, ring->ring_size / 4);
495
496 if (j < 5) {
497 header.mjpegdec0[j].table_offset = item_offset;
498 header.mjpegdec0[j].init_status = 0;
499 header.mjpegdec0[j].table_size = table_size;
500 } else {
501 header.mjpegdec1[j - 5].table_offset = item_offset;
502 header.mjpegdec1[j - 5].init_status = 0;
503 header.mjpegdec1[j - 5].table_size = table_size;
504 }
505 header.total_size += table_size;
506 item_offset += table_size;
507 }
508
509 MMSCH_V5_0_INSERT_END();
510
511 /* send init table to MMSCH */
512 size = sizeof(struct mmsch_v5_0_init_header);
513 table_loc = (uint32_t *)table->cpu_addr;
514 memcpy((void *)table_loc, &header, size);
515
516 ctx_addr = table->gpu_addr;
517 WREG32_SOC15(VCN, jpeg_inst, regMMSCH_VF_CTX_ADDR_LO, lower_32_bits(ctx_addr));
518 WREG32_SOC15(VCN, jpeg_inst, regMMSCH_VF_CTX_ADDR_HI, upper_32_bits(ctx_addr));
519
520 tmp = RREG32_SOC15(VCN, jpeg_inst, regMMSCH_VF_VMID);
521 tmp &= ~MMSCH_VF_VMID__VF_CTX_VMID_MASK;
522 tmp |= (0 << MMSCH_VF_VMID__VF_CTX_VMID__SHIFT);
523 WREG32_SOC15(VCN, jpeg_inst, regMMSCH_VF_VMID, tmp);
524
525 size = header.total_size;
526 WREG32_SOC15(VCN, jpeg_inst, regMMSCH_VF_CTX_SIZE, size);
527
528 WREG32_SOC15(VCN, jpeg_inst, regMMSCH_VF_MAILBOX_RESP, 0);
529
530 param = 0x00000001;
531 WREG32_SOC15(VCN, jpeg_inst, regMMSCH_VF_MAILBOX_HOST, param);
532 tmp = 0;
533 timeout = 1000;
534 resp = 0;
535 expected = MMSCH_VF_MAILBOX_RESP__OK;
536 init_status =
537 ((struct mmsch_v5_0_init_header *)(table_loc))->mjpegdec0[i].init_status;
538 while (resp != expected) {
539 resp = RREG32_SOC15(VCN, jpeg_inst, regMMSCH_VF_MAILBOX_RESP);
540
541 if (resp != 0)
542 break;
543 udelay(10);
544 tmp = tmp + 10;
545 if (tmp >= timeout) {
546 DRM_ERROR("failed to init MMSCH. TIME-OUT after %d usec"\
547 " waiting for regMMSCH_VF_MAILBOX_RESP "\
548 "(expected=0x%08x, readback=0x%08x)\n",
549 tmp, expected, resp);
550 return -EBUSY;
551 }
552 }
553 if (resp != expected && resp != MMSCH_VF_MAILBOX_RESP__INCOMPLETE &&
554 init_status != MMSCH_VF_ENGINE_STATUS__PASS)
555 DRM_ERROR("MMSCH init status is incorrect! readback=0x%08x, header init status for jpeg: %x\n",
556 resp, init_status);
557
558 }
559 return 0;
560 }
561
562 /**
563 * jpeg_v5_0_1_start - start JPEG block
564 *
565 * @adev: amdgpu_device pointer
566 *
567 * Setup and start the JPEG block
568 */
jpeg_v5_0_1_start(struct amdgpu_device * adev)569 static int jpeg_v5_0_1_start(struct amdgpu_device *adev)
570 {
571 struct amdgpu_ring *ring;
572 int i, j;
573
574 for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
575 jpeg_v5_0_1_init_inst(adev, i);
576 for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {
577 ring = &adev->jpeg.inst[i].ring_dec[j];
578 jpeg_v5_0_1_init_jrbc(ring);
579 }
580 }
581
582 return 0;
583 }
584
585 /**
586 * jpeg_v5_0_1_stop - stop JPEG block
587 *
588 * @adev: amdgpu_device pointer
589 *
590 * stop the JPEG block
591 */
jpeg_v5_0_1_stop(struct amdgpu_device * adev)592 static int jpeg_v5_0_1_stop(struct amdgpu_device *adev)
593 {
594 int i;
595
596 for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i)
597 jpeg_v5_0_1_deinit_inst(adev, i);
598
599 return 0;
600 }
601
602 /**
603 * jpeg_v5_0_1_dec_ring_get_rptr - get read pointer
604 *
605 * @ring: amdgpu_ring pointer
606 *
607 * Returns the current hardware read pointer
608 */
jpeg_v5_0_1_dec_ring_get_rptr(struct amdgpu_ring * ring)609 static uint64_t jpeg_v5_0_1_dec_ring_get_rptr(struct amdgpu_ring *ring)
610 {
611 struct amdgpu_device *adev = ring->adev;
612
613 return RREG32_SOC15_OFFSET(JPEG, GET_INST(JPEG, ring->me), regUVD_JRBC_RB_RPTR,
614 ring->pipe ? jpeg_v5_0_1_core_reg_offset(ring->pipe) : 0);
615 }
616
617 /**
618 * jpeg_v5_0_1_dec_ring_get_wptr - get write pointer
619 *
620 * @ring: amdgpu_ring pointer
621 *
622 * Returns the current hardware write pointer
623 */
jpeg_v5_0_1_dec_ring_get_wptr(struct amdgpu_ring * ring)624 static uint64_t jpeg_v5_0_1_dec_ring_get_wptr(struct amdgpu_ring *ring)
625 {
626 struct amdgpu_device *adev = ring->adev;
627
628 if (ring->use_doorbell)
629 return adev->wb.wb[ring->wptr_offs];
630
631 return RREG32_SOC15_OFFSET(JPEG, GET_INST(JPEG, ring->me), regUVD_JRBC_RB_WPTR,
632 ring->pipe ? jpeg_v5_0_1_core_reg_offset(ring->pipe) : 0);
633 }
634
635 /**
636 * jpeg_v5_0_1_dec_ring_set_wptr - set write pointer
637 *
638 * @ring: amdgpu_ring pointer
639 *
640 * Commits the write pointer to the hardware
641 */
jpeg_v5_0_1_dec_ring_set_wptr(struct amdgpu_ring * ring)642 static void jpeg_v5_0_1_dec_ring_set_wptr(struct amdgpu_ring *ring)
643 {
644 struct amdgpu_device *adev = ring->adev;
645
646 if (ring->use_doorbell) {
647 adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
648 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
649 } else {
650 WREG32_SOC15_OFFSET(JPEG, GET_INST(JPEG, ring->me),
651 regUVD_JRBC_RB_WPTR,
652 (ring->pipe ? jpeg_v5_0_1_core_reg_offset(ring->pipe) : 0),
653 lower_32_bits(ring->wptr));
654 }
655 }
656
jpeg_v5_0_1_is_idle(struct amdgpu_ip_block * ip_block)657 static bool jpeg_v5_0_1_is_idle(struct amdgpu_ip_block *ip_block)
658 {
659 struct amdgpu_device *adev = ip_block->adev;
660 bool ret = false;
661 int i, j;
662
663 for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
664 for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {
665 int reg_offset = (j ? jpeg_v5_0_1_core_reg_offset(j) : 0);
666
667 ret &= ((RREG32_SOC15_OFFSET(JPEG, GET_INST(JPEG, i),
668 regUVD_JRBC_STATUS, reg_offset) &
669 UVD_JRBC_STATUS__RB_JOB_DONE_MASK) ==
670 UVD_JRBC_STATUS__RB_JOB_DONE_MASK);
671 }
672 }
673
674 return ret;
675 }
676
jpeg_v5_0_1_wait_for_idle(struct amdgpu_ip_block * ip_block)677 static int jpeg_v5_0_1_wait_for_idle(struct amdgpu_ip_block *ip_block)
678 {
679 struct amdgpu_device *adev = ip_block->adev;
680 int ret = 0;
681 int i, j;
682
683 for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
684 for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {
685 int reg_offset = (j ? jpeg_v5_0_1_core_reg_offset(j) : 0);
686
687 ret &= SOC15_WAIT_ON_RREG_OFFSET(JPEG, GET_INST(JPEG, i),
688 regUVD_JRBC_STATUS, reg_offset,
689 UVD_JRBC_STATUS__RB_JOB_DONE_MASK,
690 UVD_JRBC_STATUS__RB_JOB_DONE_MASK);
691 }
692 }
693 return ret;
694 }
695
jpeg_v5_0_1_set_clockgating_state(struct amdgpu_ip_block * ip_block,enum amd_clockgating_state state)696 static int jpeg_v5_0_1_set_clockgating_state(struct amdgpu_ip_block *ip_block,
697 enum amd_clockgating_state state)
698 {
699 struct amdgpu_device *adev = ip_block->adev;
700 bool enable = state == AMD_CG_STATE_GATE;
701
702 int i;
703
704 if (!enable)
705 return 0;
706
707 for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
708 if (!jpeg_v5_0_1_is_idle(ip_block))
709 return -EBUSY;
710 }
711
712 return 0;
713 }
714
jpeg_v5_0_1_set_powergating_state(struct amdgpu_ip_block * ip_block,enum amd_powergating_state state)715 static int jpeg_v5_0_1_set_powergating_state(struct amdgpu_ip_block *ip_block,
716 enum amd_powergating_state state)
717 {
718 struct amdgpu_device *adev = ip_block->adev;
719 int ret;
720
721 if (amdgpu_sriov_vf(adev)) {
722 adev->jpeg.cur_state = AMD_PG_STATE_UNGATE;
723 return 0;
724 }
725
726 if (state == adev->jpeg.cur_state)
727 return 0;
728
729 if (state == AMD_PG_STATE_GATE)
730 ret = jpeg_v5_0_1_stop(adev);
731 else
732 ret = jpeg_v5_0_1_start(adev);
733
734 if (!ret)
735 adev->jpeg.cur_state = state;
736
737 return ret;
738 }
739
jpeg_v5_0_1_set_interrupt_state(struct amdgpu_device * adev,struct amdgpu_irq_src * source,unsigned int type,enum amdgpu_interrupt_state state)740 static int jpeg_v5_0_1_set_interrupt_state(struct amdgpu_device *adev,
741 struct amdgpu_irq_src *source,
742 unsigned int type,
743 enum amdgpu_interrupt_state state)
744 {
745 return 0;
746 }
747
jpeg_v5_0_1_set_ras_interrupt_state(struct amdgpu_device * adev,struct amdgpu_irq_src * source,unsigned int type,enum amdgpu_interrupt_state state)748 static int jpeg_v5_0_1_set_ras_interrupt_state(struct amdgpu_device *adev,
749 struct amdgpu_irq_src *source,
750 unsigned int type,
751 enum amdgpu_interrupt_state state)
752 {
753 return 0;
754 }
755
756
757
jpeg_v5_0_1_process_interrupt(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)758 static int jpeg_v5_0_1_process_interrupt(struct amdgpu_device *adev,
759 struct amdgpu_irq_src *source,
760 struct amdgpu_iv_entry *entry)
761 {
762 u32 i, inst;
763
764 i = node_id_to_phys_map[entry->node_id];
765 DRM_DEV_DEBUG(adev->dev, "IH: JPEG TRAP\n");
766
767 for (inst = 0; inst < adev->jpeg.num_jpeg_inst; ++inst)
768 if (adev->jpeg.inst[inst].aid_id == i)
769 break;
770
771 if (inst >= adev->jpeg.num_jpeg_inst) {
772 dev_WARN_ONCE(adev->dev, 1,
773 "Interrupt received for unknown JPEG instance %d",
774 entry->node_id);
775 return 0;
776 }
777
778 switch (entry->src_id) {
779 case VCN_5_0__SRCID__JPEG_DECODE:
780 amdgpu_fence_process(&adev->jpeg.inst[inst].ring_dec[0]);
781 break;
782 case VCN_5_0__SRCID__JPEG1_DECODE:
783 amdgpu_fence_process(&adev->jpeg.inst[inst].ring_dec[1]);
784 break;
785 case VCN_5_0__SRCID__JPEG2_DECODE:
786 amdgpu_fence_process(&adev->jpeg.inst[inst].ring_dec[2]);
787 break;
788 case VCN_5_0__SRCID__JPEG3_DECODE:
789 amdgpu_fence_process(&adev->jpeg.inst[inst].ring_dec[3]);
790 break;
791 case VCN_5_0__SRCID__JPEG4_DECODE:
792 amdgpu_fence_process(&adev->jpeg.inst[inst].ring_dec[4]);
793 break;
794 case VCN_5_0__SRCID__JPEG5_DECODE:
795 amdgpu_fence_process(&adev->jpeg.inst[inst].ring_dec[5]);
796 break;
797 case VCN_5_0__SRCID__JPEG6_DECODE:
798 amdgpu_fence_process(&adev->jpeg.inst[inst].ring_dec[6]);
799 break;
800 case VCN_5_0__SRCID__JPEG7_DECODE:
801 amdgpu_fence_process(&adev->jpeg.inst[inst].ring_dec[7]);
802 break;
803 case VCN_5_0__SRCID__JPEG8_DECODE:
804 amdgpu_fence_process(&adev->jpeg.inst[inst].ring_dec[8]);
805 break;
806 case VCN_5_0__SRCID__JPEG9_DECODE:
807 amdgpu_fence_process(&adev->jpeg.inst[inst].ring_dec[9]);
808 break;
809 default:
810 DRM_DEV_ERROR(adev->dev, "Unhandled interrupt: %d %d\n",
811 entry->src_id, entry->src_data[0]);
812 break;
813 }
814
815 return 0;
816 }
817
jpeg_v5_0_1_core_stall_reset(struct amdgpu_ring * ring)818 static void jpeg_v5_0_1_core_stall_reset(struct amdgpu_ring *ring)
819 {
820 struct amdgpu_device *adev = ring->adev;
821 int jpeg_inst = GET_INST(JPEG, ring->me);
822 int reg_offset = ring->pipe ? jpeg_v5_0_1_core_reg_offset(ring->pipe) : 0;
823
824 WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
825 regUVD_JMI0_UVD_JMI_CLIENT_STALL,
826 reg_offset, 0x1F);
827 SOC15_WAIT_ON_RREG_OFFSET(JPEG, jpeg_inst,
828 regUVD_JMI0_UVD_JMI_CLIENT_CLEAN_STATUS,
829 reg_offset, 0x1F, 0x1F);
830 WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
831 regUVD_JMI0_JPEG_LMI_DROP,
832 reg_offset, 0x1F);
833 WREG32_SOC15(JPEG, jpeg_inst, regJPEG_CORE_RST_CTRL, 1 << ring->pipe);
834 WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
835 regUVD_JMI0_UVD_JMI_CLIENT_STALL,
836 reg_offset, 0x00);
837 WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
838 regUVD_JMI0_JPEG_LMI_DROP,
839 reg_offset, 0x00);
840 WREG32_SOC15(JPEG, jpeg_inst, regJPEG_CORE_RST_CTRL, 0x00);
841 }
842
jpeg_v5_0_1_ring_reset(struct amdgpu_ring * ring,unsigned int vmid,struct amdgpu_fence * timedout_fence)843 static int jpeg_v5_0_1_ring_reset(struct amdgpu_ring *ring,
844 unsigned int vmid,
845 struct amdgpu_fence *timedout_fence)
846 {
847 amdgpu_ring_reset_helper_begin(ring, timedout_fence);
848 jpeg_v5_0_1_core_stall_reset(ring);
849 jpeg_v5_0_1_init_jrbc(ring);
850 return amdgpu_ring_reset_helper_end(ring, timedout_fence);
851 }
852
853 static const struct amd_ip_funcs jpeg_v5_0_1_ip_funcs = {
854 .name = "jpeg_v5_0_1",
855 .early_init = jpeg_v5_0_1_early_init,
856 .late_init = NULL,
857 .sw_init = jpeg_v5_0_1_sw_init,
858 .sw_fini = jpeg_v5_0_1_sw_fini,
859 .hw_init = jpeg_v5_0_1_hw_init,
860 .hw_fini = jpeg_v5_0_1_hw_fini,
861 .suspend = jpeg_v5_0_1_suspend,
862 .resume = jpeg_v5_0_1_resume,
863 .is_idle = jpeg_v5_0_1_is_idle,
864 .wait_for_idle = jpeg_v5_0_1_wait_for_idle,
865 .check_soft_reset = NULL,
866 .pre_soft_reset = NULL,
867 .soft_reset = NULL,
868 .post_soft_reset = NULL,
869 .set_clockgating_state = jpeg_v5_0_1_set_clockgating_state,
870 .set_powergating_state = jpeg_v5_0_1_set_powergating_state,
871 .dump_ip_state = amdgpu_jpeg_dump_ip_state,
872 .print_ip_state = amdgpu_jpeg_print_ip_state,
873 };
874
875 static const struct amdgpu_ring_funcs jpeg_v5_0_1_dec_ring_vm_funcs = {
876 .type = AMDGPU_RING_TYPE_VCN_JPEG,
877 .align_mask = 0xf,
878 .get_rptr = jpeg_v5_0_1_dec_ring_get_rptr,
879 .get_wptr = jpeg_v5_0_1_dec_ring_get_wptr,
880 .set_wptr = jpeg_v5_0_1_dec_ring_set_wptr,
881 .parse_cs = amdgpu_jpeg_dec_parse_cs,
882 .emit_frame_size =
883 SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
884 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
885 8 + /* jpeg_v5_0_1_dec_ring_emit_vm_flush */
886 22 + 22 + /* jpeg_v5_0_1_dec_ring_emit_fence x2 vm fence */
887 8 + 16,
888 .emit_ib_size = 22, /* jpeg_v5_0_1_dec_ring_emit_ib */
889 .emit_ib = jpeg_v4_0_3_dec_ring_emit_ib,
890 .emit_fence = jpeg_v4_0_3_dec_ring_emit_fence,
891 .emit_vm_flush = jpeg_v4_0_3_dec_ring_emit_vm_flush,
892 .emit_hdp_flush = jpeg_v4_0_3_ring_emit_hdp_flush,
893 .test_ring = amdgpu_jpeg_dec_ring_test_ring,
894 .test_ib = amdgpu_jpeg_dec_ring_test_ib,
895 .insert_nop = jpeg_v4_0_3_dec_ring_nop,
896 .insert_start = jpeg_v4_0_3_dec_ring_insert_start,
897 .insert_end = jpeg_v4_0_3_dec_ring_insert_end,
898 .pad_ib = amdgpu_ring_generic_pad_ib,
899 .begin_use = amdgpu_jpeg_ring_begin_use,
900 .end_use = amdgpu_jpeg_ring_end_use,
901 .emit_wreg = jpeg_v4_0_3_dec_ring_emit_wreg,
902 .emit_reg_wait = jpeg_v4_0_3_dec_ring_emit_reg_wait,
903 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
904 .reset = jpeg_v5_0_1_ring_reset,
905 };
906
jpeg_v5_0_1_set_dec_ring_funcs(struct amdgpu_device * adev)907 static void jpeg_v5_0_1_set_dec_ring_funcs(struct amdgpu_device *adev)
908 {
909 int i, j, jpeg_inst;
910
911 for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
912 for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {
913 adev->jpeg.inst[i].ring_dec[j].funcs = &jpeg_v5_0_1_dec_ring_vm_funcs;
914 adev->jpeg.inst[i].ring_dec[j].me = i;
915 adev->jpeg.inst[i].ring_dec[j].pipe = j;
916 }
917 jpeg_inst = GET_INST(JPEG, i);
918 adev->jpeg.inst[i].aid_id =
919 jpeg_inst / adev->jpeg.num_inst_per_aid;
920 }
921 }
922
923 static const struct amdgpu_irq_src_funcs jpeg_v5_0_1_irq_funcs = {
924 .set = jpeg_v5_0_1_set_interrupt_state,
925 .process = jpeg_v5_0_1_process_interrupt,
926 };
927
928 static const struct amdgpu_irq_src_funcs jpeg_v5_0_1_ras_irq_funcs = {
929 .set = jpeg_v5_0_1_set_ras_interrupt_state,
930 .process = amdgpu_jpeg_process_poison_irq,
931 };
932
jpeg_v5_0_1_set_irq_funcs(struct amdgpu_device * adev)933 static void jpeg_v5_0_1_set_irq_funcs(struct amdgpu_device *adev)
934 {
935 int i;
936
937 for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i)
938 adev->jpeg.inst->irq.num_types += adev->jpeg.num_jpeg_rings;
939
940 adev->jpeg.inst->irq.funcs = &jpeg_v5_0_1_irq_funcs;
941
942 adev->jpeg.inst->ras_poison_irq.num_types = 1;
943 adev->jpeg.inst->ras_poison_irq.funcs = &jpeg_v5_0_1_ras_irq_funcs;
944
945 }
946
947 const struct amdgpu_ip_block_version jpeg_v5_0_1_ip_block = {
948 .type = AMD_IP_BLOCK_TYPE_JPEG,
949 .major = 5,
950 .minor = 0,
951 .rev = 1,
952 .funcs = &jpeg_v5_0_1_ip_funcs,
953 };
954
jpeg_v5_0_1_query_poison_by_instance(struct amdgpu_device * adev,uint32_t instance,uint32_t sub_block)955 static uint32_t jpeg_v5_0_1_query_poison_by_instance(struct amdgpu_device *adev,
956 uint32_t instance, uint32_t sub_block)
957 {
958 uint32_t poison_stat = 0, reg_value = 0;
959
960 switch (sub_block) {
961 case AMDGPU_JPEG_V5_0_1_JPEG0:
962 reg_value = RREG32_SOC15(JPEG, instance, regUVD_RAS_JPEG0_STATUS);
963 poison_stat = REG_GET_FIELD(reg_value, UVD_RAS_JPEG0_STATUS, POISONED_PF);
964 break;
965 case AMDGPU_JPEG_V5_0_1_JPEG1:
966 reg_value = RREG32_SOC15(JPEG, instance, regUVD_RAS_JPEG1_STATUS);
967 poison_stat = REG_GET_FIELD(reg_value, UVD_RAS_JPEG1_STATUS, POISONED_PF);
968 break;
969 default:
970 break;
971 }
972
973 if (poison_stat)
974 dev_info(adev->dev, "Poison detected in JPEG%d sub_block%d\n",
975 instance, sub_block);
976
977 return poison_stat;
978 }
979
jpeg_v5_0_1_query_ras_poison_status(struct amdgpu_device * adev)980 static bool jpeg_v5_0_1_query_ras_poison_status(struct amdgpu_device *adev)
981 {
982 uint32_t inst = 0, sub = 0, poison_stat = 0;
983
984 for (inst = 0; inst < adev->jpeg.num_jpeg_inst; inst++)
985 for (sub = 0; sub < AMDGPU_JPEG_V5_0_1_MAX_SUB_BLOCK; sub++)
986 poison_stat +=
987 jpeg_v5_0_1_query_poison_by_instance(adev, inst, sub);
988
989 return !!poison_stat;
990 }
991
992 static const struct amdgpu_ras_block_hw_ops jpeg_v5_0_1_ras_hw_ops = {
993 .query_poison_status = jpeg_v5_0_1_query_ras_poison_status,
994 };
995
jpeg_v5_0_1_aca_bank_parser(struct aca_handle * handle,struct aca_bank * bank,enum aca_smu_type type,void * data)996 static int jpeg_v5_0_1_aca_bank_parser(struct aca_handle *handle, struct aca_bank *bank,
997 enum aca_smu_type type, void *data)
998 {
999 struct aca_bank_info info;
1000 u64 misc0;
1001 int ret;
1002
1003 ret = aca_bank_info_decode(bank, &info);
1004 if (ret)
1005 return ret;
1006
1007 misc0 = bank->regs[ACA_REG_IDX_MISC0];
1008 switch (type) {
1009 case ACA_SMU_TYPE_UE:
1010 bank->aca_err_type = ACA_ERROR_TYPE_UE;
1011 ret = aca_error_cache_log_bank_error(handle, &info, ACA_ERROR_TYPE_UE,
1012 1ULL);
1013 break;
1014 case ACA_SMU_TYPE_CE:
1015 bank->aca_err_type = ACA_ERROR_TYPE_CE;
1016 ret = aca_error_cache_log_bank_error(handle, &info, bank->aca_err_type,
1017 ACA_REG__MISC0__ERRCNT(misc0));
1018 break;
1019 default:
1020 return -EINVAL;
1021 }
1022
1023 return ret;
1024 }
1025
1026 /* reference to smu driver if header file */
1027 static int jpeg_v5_0_1_err_codes[] = {
1028 16, 17, 18, 19, 20, 21, 22, 23, /* JPEG[0-9][S|D] */
1029 24, 25, 26, 27, 28, 29, 30, 31,
1030 48, 49, 50, 51,
1031 };
1032
jpeg_v5_0_1_aca_bank_is_valid(struct aca_handle * handle,struct aca_bank * bank,enum aca_smu_type type,void * data)1033 static bool jpeg_v5_0_1_aca_bank_is_valid(struct aca_handle *handle, struct aca_bank *bank,
1034 enum aca_smu_type type, void *data)
1035 {
1036 u32 instlo;
1037
1038 instlo = ACA_REG__IPID__INSTANCEIDLO(bank->regs[ACA_REG_IDX_IPID]);
1039 instlo &= GENMASK(31, 1);
1040
1041 if (instlo != mmSMNAID_AID0_MCA_SMU)
1042 return false;
1043
1044 if (aca_bank_check_error_codes(handle->adev, bank,
1045 jpeg_v5_0_1_err_codes,
1046 ARRAY_SIZE(jpeg_v5_0_1_err_codes)))
1047 return false;
1048
1049 return true;
1050 }
1051
1052 static const struct aca_bank_ops jpeg_v5_0_1_aca_bank_ops = {
1053 .aca_bank_parser = jpeg_v5_0_1_aca_bank_parser,
1054 .aca_bank_is_valid = jpeg_v5_0_1_aca_bank_is_valid,
1055 };
1056
1057 static const struct aca_info jpeg_v5_0_1_aca_info = {
1058 .hwip = ACA_HWIP_TYPE_SMU,
1059 .mask = ACA_ERROR_UE_MASK,
1060 .bank_ops = &jpeg_v5_0_1_aca_bank_ops,
1061 };
1062
jpeg_v5_0_1_ras_late_init(struct amdgpu_device * adev,struct ras_common_if * ras_block)1063 static int jpeg_v5_0_1_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
1064 {
1065 int r;
1066
1067 r = amdgpu_ras_block_late_init(adev, ras_block);
1068 if (r)
1069 return r;
1070
1071 r = amdgpu_ras_bind_aca(adev, AMDGPU_RAS_BLOCK__JPEG,
1072 &jpeg_v5_0_1_aca_info, NULL);
1073 if (r)
1074 goto late_fini;
1075
1076 if (amdgpu_ras_is_supported(adev, ras_block->block) &&
1077 adev->jpeg.inst->ras_poison_irq.funcs) {
1078 r = amdgpu_irq_get(adev, &adev->jpeg.inst->ras_poison_irq, 0);
1079 if (r)
1080 goto late_fini;
1081 }
1082
1083 return 0;
1084
1085 late_fini:
1086 amdgpu_ras_block_late_fini(adev, ras_block);
1087
1088 return r;
1089 }
1090
1091 static struct amdgpu_jpeg_ras jpeg_v5_0_1_ras = {
1092 .ras_block = {
1093 .hw_ops = &jpeg_v5_0_1_ras_hw_ops,
1094 .ras_late_init = jpeg_v5_0_1_ras_late_init,
1095 },
1096 };
1097
jpeg_v5_0_1_set_ras_funcs(struct amdgpu_device * adev)1098 static void jpeg_v5_0_1_set_ras_funcs(struct amdgpu_device *adev)
1099 {
1100 adev->jpeg.ras = &jpeg_v5_0_1_ras;
1101 }
1102