xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c (revision e619ac419174fdb6093b9e78b41bb5d0a97de9dd)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright 2025 Advanced Micro Devices, Inc.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  *
23  */
24 #include <linux/list.h>
25 #include "amdgpu.h"
26 
27 static const guid_t MCE			= CPER_NOTIFY_MCE;
28 static const guid_t CMC			= CPER_NOTIFY_CMC;
29 static const guid_t BOOT		= BOOT_TYPE;
30 
31 static const guid_t CRASHDUMP		= AMD_CRASHDUMP;
32 static const guid_t RUNTIME		= AMD_GPU_NONSTANDARD_ERROR;
33 
34 static void __inc_entry_length(struct cper_hdr *hdr, uint32_t size)
35 {
36 	hdr->record_length += size;
37 }
38 
39 static void amdgpu_cper_get_timestamp(struct cper_timestamp *timestamp)
40 {
41 	struct tm tm;
42 	time64_t now = ktime_get_real_seconds();
43 
44 	time64_to_tm(now, 0, &tm);
45 	timestamp->seconds = tm.tm_sec;
46 	timestamp->minutes = tm.tm_min;
47 	timestamp->hours = tm.tm_hour;
48 	timestamp->flag = 0;
49 	timestamp->day = tm.tm_mday;
50 	timestamp->month = 1 + tm.tm_mon;
51 	timestamp->year = (1900 + tm.tm_year) % 100;
52 	timestamp->century = (1900 + tm.tm_year) / 100;
53 }
54 
55 void amdgpu_cper_entry_fill_hdr(struct amdgpu_device *adev,
56 				struct cper_hdr *hdr,
57 				enum amdgpu_cper_type type,
58 				enum cper_error_severity sev)
59 {
60 	hdr->signature[0]		= 'C';
61 	hdr->signature[1]		= 'P';
62 	hdr->signature[2]		= 'E';
63 	hdr->signature[3]		= 'R';
64 	hdr->revision			= CPER_HDR_REV_1;
65 	hdr->signature_end		= 0xFFFFFFFF;
66 	hdr->error_severity		= sev;
67 
68 	hdr->valid_bits.platform_id	= 1;
69 	hdr->valid_bits.partition_id	= 1;
70 	hdr->valid_bits.timestamp	= 1;
71 
72 	amdgpu_cper_get_timestamp(&hdr->timestamp);
73 
74 	snprintf(hdr->record_id, 8, "%d", atomic_inc_return(&adev->cper.unique_id));
75 	snprintf(hdr->platform_id, 16, "0x%04X:0x%04X",
76 		 adev->pdev->vendor, adev->pdev->device);
77 	/* pmfw version should be part of creator_id according to CPER spec */
78 	snprintf(hdr->creator_id, 16, "%s", CPER_CREATOR_ID_AMDGPU);
79 
80 	switch (type) {
81 	case AMDGPU_CPER_TYPE_BOOT:
82 		hdr->notify_type = BOOT;
83 		break;
84 	case AMDGPU_CPER_TYPE_FATAL:
85 	case AMDGPU_CPER_TYPE_BP_THRESHOLD:
86 		hdr->notify_type = MCE;
87 		break;
88 	case AMDGPU_CPER_TYPE_RUNTIME:
89 		if (sev == CPER_SEV_NON_FATAL_CORRECTED)
90 			hdr->notify_type = CMC;
91 		else
92 			hdr->notify_type = MCE;
93 		break;
94 	default:
95 		dev_err(adev->dev, "Unknown CPER Type\n");
96 		break;
97 	}
98 
99 	__inc_entry_length(hdr, HDR_LEN);
100 }
101 
102 static int amdgpu_cper_entry_fill_section_desc(struct amdgpu_device *adev,
103 					       struct cper_sec_desc *section_desc,
104 					       bool bp_threshold,
105 					       bool poison,
106 					       enum cper_error_severity sev,
107 					       guid_t sec_type,
108 					       uint32_t section_length,
109 					       uint32_t section_offset)
110 {
111 	section_desc->revision_minor		= CPER_SEC_MINOR_REV_1;
112 	section_desc->revision_major		= CPER_SEC_MAJOR_REV_22;
113 	section_desc->sec_offset		= section_offset;
114 	section_desc->sec_length		= section_length;
115 	section_desc->valid_bits.fru_id		= 1;
116 	section_desc->valid_bits.fru_text	= 1;
117 	section_desc->flag_bits.primary		= 1;
118 	section_desc->severity			= sev;
119 	section_desc->sec_type			= sec_type;
120 
121 	if (adev->smuio.funcs &&
122 	    adev->smuio.funcs->get_socket_id)
123 		snprintf(section_desc->fru_text, 20, "OAM%d",
124 			 adev->smuio.funcs->get_socket_id(adev));
125 	/* TODO: fru_id is 16 bytes in CPER spec, but driver defines it as 20 bytes */
126 	snprintf(section_desc->fru_id, 16, "%llx", adev->unique_id);
127 
128 	if (bp_threshold)
129 		section_desc->flag_bits.exceed_err_threshold = 1;
130 	if (poison)
131 		section_desc->flag_bits.latent_err = 1;
132 
133 	return 0;
134 }
135 
136 int amdgpu_cper_entry_fill_fatal_section(struct amdgpu_device *adev,
137 					 struct cper_hdr *hdr,
138 					 uint32_t idx,
139 					 struct cper_sec_crashdump_reg_data reg_data)
140 {
141 	struct cper_sec_desc *section_desc;
142 	struct cper_sec_crashdump_fatal *section;
143 
144 	section_desc = (struct cper_sec_desc *)((uint8_t *)hdr + SEC_DESC_OFFSET(idx));
145 	section = (struct cper_sec_crashdump_fatal *)((uint8_t *)hdr +
146 		   FATAL_SEC_OFFSET(hdr->sec_cnt, idx));
147 
148 	amdgpu_cper_entry_fill_section_desc(adev, section_desc, false, false,
149 					    CPER_SEV_FATAL, CRASHDUMP, FATAL_SEC_LEN,
150 					    FATAL_SEC_OFFSET(hdr->sec_cnt, idx));
151 
152 	section->body.reg_ctx_type = CPER_CTX_TYPE_CRASH;
153 	section->body.reg_arr_size = sizeof(reg_data);
154 	section->body.data = reg_data;
155 
156 	__inc_entry_length(hdr, SEC_DESC_LEN + FATAL_SEC_LEN);
157 
158 	return 0;
159 }
160 
161 int amdgpu_cper_entry_fill_runtime_section(struct amdgpu_device *adev,
162 					   struct cper_hdr *hdr,
163 					   uint32_t idx,
164 					   enum cper_error_severity sev,
165 					   uint32_t *reg_dump,
166 					   uint32_t reg_count)
167 {
168 	struct cper_sec_desc *section_desc;
169 	struct cper_sec_nonstd_err *section;
170 	bool poison;
171 
172 	poison = (sev == CPER_SEV_NON_FATAL_CORRECTED) ? false : true;
173 	section_desc = (struct cper_sec_desc *)((uint8_t *)hdr + SEC_DESC_OFFSET(idx));
174 	section = (struct cper_sec_nonstd_err *)((uint8_t *)hdr +
175 		   NONSTD_SEC_OFFSET(hdr->sec_cnt, idx));
176 
177 	amdgpu_cper_entry_fill_section_desc(adev, section_desc, false, poison,
178 					    sev, RUNTIME, NONSTD_SEC_LEN,
179 					    NONSTD_SEC_OFFSET(hdr->sec_cnt, idx));
180 
181 	reg_count = umin(reg_count, CPER_ACA_REG_COUNT);
182 
183 	section->hdr.valid_bits.err_info_cnt = 1;
184 	section->hdr.valid_bits.err_context_cnt = 1;
185 
186 	section->info.error_type = RUNTIME;
187 	section->info.ms_chk_bits.err_type_valid = 1;
188 	section->ctx.reg_ctx_type = CPER_CTX_TYPE_CRASH;
189 	section->ctx.reg_arr_size = sizeof(section->ctx.reg_dump);
190 
191 	memcpy(section->ctx.reg_dump, reg_dump, reg_count * sizeof(uint32_t));
192 
193 	__inc_entry_length(hdr, SEC_DESC_LEN + NONSTD_SEC_LEN);
194 
195 	return 0;
196 }
197 
198 int amdgpu_cper_entry_fill_bad_page_threshold_section(struct amdgpu_device *adev,
199 						      struct cper_hdr *hdr,
200 						      uint32_t idx)
201 {
202 	struct cper_sec_desc *section_desc;
203 	struct cper_sec_nonstd_err *section;
204 
205 	section_desc = (struct cper_sec_desc *)((uint8_t *)hdr + SEC_DESC_OFFSET(idx));
206 	section = (struct cper_sec_nonstd_err *)((uint8_t *)hdr +
207 		   NONSTD_SEC_OFFSET(hdr->sec_cnt, idx));
208 
209 	amdgpu_cper_entry_fill_section_desc(adev, section_desc, true, false,
210 					    CPER_SEV_NUM, RUNTIME, NONSTD_SEC_LEN,
211 					    NONSTD_SEC_OFFSET(hdr->sec_cnt, idx));
212 
213 	section->hdr.valid_bits.err_info_cnt = 1;
214 	section->hdr.valid_bits.err_context_cnt = 1;
215 
216 	section->info.error_type = RUNTIME;
217 	section->info.ms_chk_bits.err_type_valid = 1;
218 	section->ctx.reg_ctx_type = CPER_CTX_TYPE_CRASH;
219 	section->ctx.reg_arr_size = sizeof(section->ctx.reg_dump);
220 
221 	/* Hardcoded Reg dump for bad page threshold CPER */
222 	section->ctx.reg_dump[CPER_ACA_REG_CTL_LO]    = 0x1;
223 	section->ctx.reg_dump[CPER_ACA_REG_CTL_HI]    = 0x0;
224 	section->ctx.reg_dump[CPER_ACA_REG_STATUS_LO] = 0x137;
225 	section->ctx.reg_dump[CPER_ACA_REG_STATUS_HI] = 0xB0000000;
226 	section->ctx.reg_dump[CPER_ACA_REG_ADDR_LO]   = 0x0;
227 	section->ctx.reg_dump[CPER_ACA_REG_ADDR_HI]   = 0x0;
228 	section->ctx.reg_dump[CPER_ACA_REG_MISC0_LO]  = 0x0;
229 	section->ctx.reg_dump[CPER_ACA_REG_MISC0_HI]  = 0x0;
230 	section->ctx.reg_dump[CPER_ACA_REG_CONFIG_LO] = 0x2;
231 	section->ctx.reg_dump[CPER_ACA_REG_CONFIG_HI] = 0x1ff;
232 	section->ctx.reg_dump[CPER_ACA_REG_IPID_LO]   = 0x0;
233 	section->ctx.reg_dump[CPER_ACA_REG_IPID_HI]   = 0x96;
234 	section->ctx.reg_dump[CPER_ACA_REG_SYND_LO]   = 0x0;
235 	section->ctx.reg_dump[CPER_ACA_REG_SYND_HI]   = 0x0;
236 
237 	__inc_entry_length(hdr, SEC_DESC_LEN + NONSTD_SEC_LEN);
238 
239 	return 0;
240 }
241 
242 struct cper_hdr *amdgpu_cper_alloc_entry(struct amdgpu_device *adev,
243 					 enum amdgpu_cper_type type,
244 					 uint16_t section_count)
245 {
246 	struct cper_hdr *hdr;
247 	uint32_t size = 0;
248 
249 	size += HDR_LEN;
250 	size += (SEC_DESC_LEN * section_count);
251 
252 	switch (type) {
253 	case AMDGPU_CPER_TYPE_RUNTIME:
254 	case AMDGPU_CPER_TYPE_BP_THRESHOLD:
255 		size += (NONSTD_SEC_LEN * section_count);
256 		break;
257 	case AMDGPU_CPER_TYPE_FATAL:
258 		size += (FATAL_SEC_LEN * section_count);
259 		break;
260 	case AMDGPU_CPER_TYPE_BOOT:
261 		size += (BOOT_SEC_LEN * section_count);
262 		break;
263 	default:
264 		dev_err(adev->dev, "Unknown CPER Type!\n");
265 		return NULL;
266 	}
267 
268 	hdr = kzalloc(size, GFP_KERNEL);
269 	if (!hdr)
270 		return NULL;
271 
272 	/* Save this early */
273 	hdr->sec_cnt = section_count;
274 
275 	return hdr;
276 }
277 
278 int amdgpu_cper_generate_ue_record(struct amdgpu_device *adev,
279 				   struct aca_bank *bank)
280 {
281 	struct cper_hdr *fatal = NULL;
282 	struct cper_sec_crashdump_reg_data reg_data = { 0 };
283 	struct amdgpu_ring *ring = &adev->cper.ring_buf;
284 	int ret;
285 
286 	fatal = amdgpu_cper_alloc_entry(adev, AMDGPU_CPER_TYPE_FATAL, 1);
287 	if (!fatal) {
288 		dev_err(adev->dev, "fail to alloc cper entry for ue record\n");
289 		return -ENOMEM;
290 	}
291 
292 	reg_data.status_lo = lower_32_bits(bank->regs[ACA_REG_IDX_STATUS]);
293 	reg_data.status_hi = upper_32_bits(bank->regs[ACA_REG_IDX_STATUS]);
294 	reg_data.addr_lo   = lower_32_bits(bank->regs[ACA_REG_IDX_ADDR]);
295 	reg_data.addr_hi   = upper_32_bits(bank->regs[ACA_REG_IDX_ADDR]);
296 	reg_data.ipid_lo   = lower_32_bits(bank->regs[ACA_REG_IDX_IPID]);
297 	reg_data.ipid_hi   = upper_32_bits(bank->regs[ACA_REG_IDX_IPID]);
298 	reg_data.synd_lo   = lower_32_bits(bank->regs[ACA_REG_IDX_SYND]);
299 	reg_data.synd_hi   = upper_32_bits(bank->regs[ACA_REG_IDX_SYND]);
300 
301 	amdgpu_cper_entry_fill_hdr(adev, fatal, AMDGPU_CPER_TYPE_FATAL, CPER_SEV_FATAL);
302 	ret = amdgpu_cper_entry_fill_fatal_section(adev, fatal, 0, reg_data);
303 	if (ret)
304 		return ret;
305 
306 	amdgpu_cper_ring_write(ring, fatal, fatal->record_length);
307 
308 	return 0;
309 }
310 
311 int amdgpu_cper_generate_bp_threshold_record(struct amdgpu_device *adev)
312 {
313 	struct cper_hdr *bp_threshold = NULL;
314 	struct amdgpu_ring *ring = &adev->cper.ring_buf;
315 	int ret;
316 
317 	bp_threshold = amdgpu_cper_alloc_entry(adev, AMDGPU_CPER_TYPE_BP_THRESHOLD, 1);
318 	if (!bp_threshold) {
319 		dev_err(adev->dev, "fail to alloc cper entry for bad page threshold record\n");
320 		return -ENOMEM;
321 	}
322 
323 	amdgpu_cper_entry_fill_hdr(adev, bp_threshold, AMDGPU_CPER_TYPE_BP_THRESHOLD, CPER_SEV_NUM);
324 	ret = amdgpu_cper_entry_fill_bad_page_threshold_section(adev, bp_threshold, 0);
325 	if (ret)
326 		return ret;
327 
328 	amdgpu_cper_ring_write(ring, bp_threshold, bp_threshold->record_length);
329 
330 	return 0;
331 }
332 
333 static enum cper_error_severity amdgpu_aca_err_type_to_cper_sev(struct amdgpu_device *adev,
334 								enum aca_error_type aca_err_type)
335 {
336 	switch (aca_err_type) {
337 	case ACA_ERROR_TYPE_UE:
338 		return CPER_SEV_FATAL;
339 	case ACA_ERROR_TYPE_CE:
340 		return CPER_SEV_NON_FATAL_CORRECTED;
341 	case ACA_ERROR_TYPE_DEFERRED:
342 		return CPER_SEV_NON_FATAL_UNCORRECTED;
343 	default:
344 		dev_err(adev->dev, "Unknown ACA error type!\n");
345 		return CPER_SEV_FATAL;
346 	}
347 }
348 
349 int amdgpu_cper_generate_ce_records(struct amdgpu_device *adev,
350 				    struct aca_banks *banks,
351 				    uint16_t bank_count)
352 {
353 	struct cper_hdr *corrected = NULL;
354 	enum cper_error_severity sev = CPER_SEV_NON_FATAL_CORRECTED;
355 	struct amdgpu_ring *ring = &adev->cper.ring_buf;
356 	uint32_t reg_data[CPER_ACA_REG_COUNT] = { 0 };
357 	struct aca_bank_node *node;
358 	struct aca_bank *bank;
359 	uint32_t i = 0;
360 	int ret;
361 
362 	corrected = amdgpu_cper_alloc_entry(adev, AMDGPU_CPER_TYPE_RUNTIME, bank_count);
363 	if (!corrected) {
364 		dev_err(adev->dev, "fail to allocate cper entry for ce records\n");
365 		return -ENOMEM;
366 	}
367 
368 	/* Raise severity if any DE is detected in the ACA bank list */
369 	list_for_each_entry(node, &banks->list, node) {
370 		bank = &node->bank;
371 		if (bank->aca_err_type == ACA_ERROR_TYPE_DEFERRED) {
372 			sev = CPER_SEV_NON_FATAL_UNCORRECTED;
373 			break;
374 		}
375 	}
376 
377 	amdgpu_cper_entry_fill_hdr(adev, corrected, AMDGPU_CPER_TYPE_RUNTIME, sev);
378 
379 	/* Combine CE and UE in cper record */
380 	list_for_each_entry(node, &banks->list, node) {
381 		bank = &node->bank;
382 		reg_data[CPER_ACA_REG_CTL_LO]    = lower_32_bits(bank->regs[ACA_REG_IDX_CTL]);
383 		reg_data[CPER_ACA_REG_CTL_HI]    = upper_32_bits(bank->regs[ACA_REG_IDX_CTL]);
384 		reg_data[CPER_ACA_REG_STATUS_LO] = lower_32_bits(bank->regs[ACA_REG_IDX_STATUS]);
385 		reg_data[CPER_ACA_REG_STATUS_HI] = upper_32_bits(bank->regs[ACA_REG_IDX_STATUS]);
386 		reg_data[CPER_ACA_REG_ADDR_LO]   = lower_32_bits(bank->regs[ACA_REG_IDX_ADDR]);
387 		reg_data[CPER_ACA_REG_ADDR_HI]   = upper_32_bits(bank->regs[ACA_REG_IDX_ADDR]);
388 		reg_data[CPER_ACA_REG_MISC0_LO]  = lower_32_bits(bank->regs[ACA_REG_IDX_MISC0]);
389 		reg_data[CPER_ACA_REG_MISC0_HI]  = upper_32_bits(bank->regs[ACA_REG_IDX_MISC0]);
390 		reg_data[CPER_ACA_REG_CONFIG_LO] = lower_32_bits(bank->regs[ACA_REG_IDX_CONFIG]);
391 		reg_data[CPER_ACA_REG_CONFIG_HI] = upper_32_bits(bank->regs[ACA_REG_IDX_CONFIG]);
392 		reg_data[CPER_ACA_REG_IPID_LO]   = lower_32_bits(bank->regs[ACA_REG_IDX_IPID]);
393 		reg_data[CPER_ACA_REG_IPID_HI]   = upper_32_bits(bank->regs[ACA_REG_IDX_IPID]);
394 		reg_data[CPER_ACA_REG_SYND_LO]   = lower_32_bits(bank->regs[ACA_REG_IDX_SYND]);
395 		reg_data[CPER_ACA_REG_SYND_HI]   = upper_32_bits(bank->regs[ACA_REG_IDX_SYND]);
396 
397 		ret = amdgpu_cper_entry_fill_runtime_section(adev, corrected, i++,
398 				amdgpu_aca_err_type_to_cper_sev(adev, bank->aca_err_type),
399 				reg_data, CPER_ACA_REG_COUNT);
400 		if (ret)
401 			return ret;
402 	}
403 
404 	amdgpu_cper_ring_write(ring, corrected, corrected->record_length);
405 
406 	return 0;
407 }
408 
409 static bool amdgpu_cper_is_hdr(struct amdgpu_ring *ring, u64 pos)
410 {
411 	struct cper_hdr *chdr;
412 
413 	chdr = (struct cper_hdr *)&(ring->ring[pos]);
414 	return strcmp(chdr->signature, "CPER") ? false : true;
415 }
416 
417 static u32 amdgpu_cper_ring_get_ent_sz(struct amdgpu_ring *ring, u64 pos)
418 {
419 	struct cper_hdr *chdr;
420 	u64 p;
421 	u32 chunk, rec_len = 0;
422 
423 	chdr = (struct cper_hdr *)&(ring->ring[pos]);
424 	chunk = ring->ring_size - (pos << 2);
425 
426 	if (!strcmp(chdr->signature, "CPER")) {
427 		rec_len = chdr->record_length;
428 		goto calc;
429 	}
430 
431 	/* ring buffer is not full, no cper data after ring->wptr */
432 	if (ring->count_dw)
433 		goto calc;
434 
435 	for (p = pos + 1; p <= ring->buf_mask; p++) {
436 		chdr = (struct cper_hdr *)&(ring->ring[p]);
437 		if (!strcmp(chdr->signature, "CPER")) {
438 			rec_len = (p - pos) << 2;
439 			goto calc;
440 		}
441 	}
442 
443 calc:
444 	if (!rec_len)
445 		return chunk;
446 	else
447 		return umin(rec_len, chunk);
448 }
449 
450 void amdgpu_cper_ring_write(struct amdgpu_ring *ring,
451 					      void *src, int count)
452 {
453 	u64 pos, wptr_old, rptr = *ring->rptr_cpu_addr & ring->ptr_mask;
454 	u32 chunk, ent_sz;
455 	u8 *s = (u8 *)src;
456 
457 	if (count >= ring->ring_size - 4) {
458 		dev_err(ring->adev->dev,
459 			"CPER data size(%d) is larger than ring size(%d)\n",
460 			count, ring->ring_size - 4);
461 
462 		return;
463 	}
464 
465 	wptr_old = ring->wptr;
466 
467 	mutex_lock(&ring->adev->cper.ring_lock);
468 	while (count) {
469 		ent_sz = amdgpu_cper_ring_get_ent_sz(ring, ring->wptr);
470 		chunk = umin(ent_sz, count);
471 
472 		memcpy(&ring->ring[ring->wptr], s, chunk);
473 
474 		ring->wptr += (chunk >> 2);
475 		ring->wptr &= ring->ptr_mask;
476 		count -= chunk;
477 		s += chunk;
478 	}
479 
480 	/* the buffer is overflow, adjust rptr */
481 	if (((wptr_old < rptr) && (rptr <= ring->wptr)) ||
482 	    ((ring->wptr < wptr_old) && (wptr_old < rptr)) ||
483 	    ((rptr <= ring->wptr) && (ring->wptr < wptr_old))) {
484 		pos = (ring->wptr + 1) & ring->ptr_mask;
485 
486 		do {
487 			ent_sz = amdgpu_cper_ring_get_ent_sz(ring, pos);
488 
489 			rptr += (ent_sz >> 2);
490 			rptr &= ring->ptr_mask;
491 			*ring->rptr_cpu_addr = rptr;
492 
493 			pos = rptr;
494 		} while (!amdgpu_cper_is_hdr(ring, rptr));
495 	}
496 	mutex_unlock(&ring->adev->cper.ring_lock);
497 
498 	if (ring->count_dw >= (count >> 2))
499 		ring->count_dw -= (count >> 2);
500 	else
501 		ring->count_dw = 0;
502 }
503 
504 static u64 amdgpu_cper_ring_get_rptr(struct amdgpu_ring *ring)
505 {
506 	return *(ring->rptr_cpu_addr);
507 }
508 
509 static u64 amdgpu_cper_ring_get_wptr(struct amdgpu_ring *ring)
510 {
511 	return ring->wptr;
512 }
513 
514 static const struct amdgpu_ring_funcs cper_ring_funcs = {
515 	.type = AMDGPU_RING_TYPE_CPER,
516 	.align_mask = 0xff,
517 	.support_64bit_ptrs = false,
518 	.get_rptr = amdgpu_cper_ring_get_rptr,
519 	.get_wptr = amdgpu_cper_ring_get_wptr,
520 };
521 
522 static int amdgpu_cper_ring_init(struct amdgpu_device *adev)
523 {
524 	struct amdgpu_ring *ring = &(adev->cper.ring_buf);
525 
526 	mutex_init(&adev->cper.ring_lock);
527 
528 	ring->adev = NULL;
529 	ring->ring_obj = NULL;
530 	ring->use_doorbell = false;
531 	ring->no_scheduler = true;
532 	ring->funcs = &cper_ring_funcs;
533 
534 	sprintf(ring->name, "cper");
535 	return amdgpu_ring_init(adev, ring, CPER_MAX_RING_SIZE, NULL, 0,
536 				AMDGPU_RING_PRIO_DEFAULT, NULL);
537 }
538 
539 int amdgpu_cper_init(struct amdgpu_device *adev)
540 {
541 	mutex_init(&adev->cper.cper_lock);
542 
543 	adev->cper.enabled = true;
544 	adev->cper.max_count = CPER_MAX_ALLOWED_COUNT;
545 
546 	return amdgpu_cper_ring_init(adev);
547 }
548 
549 int amdgpu_cper_fini(struct amdgpu_device *adev)
550 {
551 	adev->cper.enabled = false;
552 
553 	amdgpu_ring_fini(&(adev->cper.ring_buf));
554 	adev->cper.count = 0;
555 	adev->cper.wptr = 0;
556 
557 	return 0;
558 }
559