Lines Matching +full:- +full:section

1 // SPDX-License-Identifier: MIT
36 hdr->record_length += size; in __inc_entry_length()
45 timestamp->seconds = tm.tm_sec; in amdgpu_cper_get_timestamp()
46 timestamp->minutes = tm.tm_min; in amdgpu_cper_get_timestamp()
47 timestamp->hours = tm.tm_hour; in amdgpu_cper_get_timestamp()
48 timestamp->flag = 0; in amdgpu_cper_get_timestamp()
49 timestamp->day = tm.tm_mday; in amdgpu_cper_get_timestamp()
50 timestamp->month = 1 + tm.tm_mon; in amdgpu_cper_get_timestamp()
51 timestamp->year = (1900 + tm.tm_year) % 100; in amdgpu_cper_get_timestamp()
52 timestamp->century = (1900 + tm.tm_year) / 100; in amdgpu_cper_get_timestamp()
62 hdr->signature[0] = 'C'; in amdgpu_cper_entry_fill_hdr()
63 hdr->signature[1] = 'P'; in amdgpu_cper_entry_fill_hdr()
64 hdr->signature[2] = 'E'; in amdgpu_cper_entry_fill_hdr()
65 hdr->signature[3] = 'R'; in amdgpu_cper_entry_fill_hdr()
66 hdr->revision = CPER_HDR_REV_1; in amdgpu_cper_entry_fill_hdr()
67 hdr->signature_end = 0xFFFFFFFF; in amdgpu_cper_entry_fill_hdr()
68 hdr->error_severity = sev; in amdgpu_cper_entry_fill_hdr()
70 hdr->valid_bits.platform_id = 1; in amdgpu_cper_entry_fill_hdr()
71 hdr->valid_bits.timestamp = 1; in amdgpu_cper_entry_fill_hdr()
73 amdgpu_cper_get_timestamp(&hdr->timestamp); in amdgpu_cper_entry_fill_hdr()
76 (adev->smuio.funcs && adev->smuio.funcs->get_socket_id) ? in amdgpu_cper_entry_fill_hdr()
77 adev->smuio.funcs->get_socket_id(adev) : in amdgpu_cper_entry_fill_hdr()
79 atomic_inc_return(&adev->cper.unique_id)); in amdgpu_cper_entry_fill_hdr()
80 memcpy(hdr->record_id, record_id, 8); in amdgpu_cper_entry_fill_hdr()
82 snprintf(hdr->platform_id, 16, "0x%04X:0x%04X", in amdgpu_cper_entry_fill_hdr()
83 adev->pdev->vendor, adev->pdev->device); in amdgpu_cper_entry_fill_hdr()
85 snprintf(hdr->creator_id, 16, "%s", CPER_CREATOR_ID_AMDGPU); in amdgpu_cper_entry_fill_hdr()
89 hdr->notify_type = BOOT; in amdgpu_cper_entry_fill_hdr()
93 hdr->notify_type = MCE; in amdgpu_cper_entry_fill_hdr()
97 hdr->notify_type = CMC; in amdgpu_cper_entry_fill_hdr()
99 hdr->notify_type = MCE; in amdgpu_cper_entry_fill_hdr()
102 dev_err(adev->dev, "Unknown CPER Type\n"); in amdgpu_cper_entry_fill_hdr()
118 section_desc->revision_minor = CPER_SEC_MINOR_REV_1; in amdgpu_cper_entry_fill_section_desc()
119 section_desc->revision_major = CPER_SEC_MAJOR_REV_22; in amdgpu_cper_entry_fill_section_desc()
120 section_desc->sec_offset = section_offset; in amdgpu_cper_entry_fill_section_desc()
121 section_desc->sec_length = section_length; in amdgpu_cper_entry_fill_section_desc()
122 section_desc->valid_bits.fru_text = 1; in amdgpu_cper_entry_fill_section_desc()
123 section_desc->flag_bits.primary = 1; in amdgpu_cper_entry_fill_section_desc()
124 section_desc->severity = sev; in amdgpu_cper_entry_fill_section_desc()
125 section_desc->sec_type = sec_type; in amdgpu_cper_entry_fill_section_desc()
127 snprintf(section_desc->fru_text, 20, "OAM%d", in amdgpu_cper_entry_fill_section_desc()
128 (adev->smuio.funcs && adev->smuio.funcs->get_socket_id) ? in amdgpu_cper_entry_fill_section_desc()
129 adev->smuio.funcs->get_socket_id(adev) : in amdgpu_cper_entry_fill_section_desc()
133 section_desc->flag_bits.exceed_err_threshold = 1; in amdgpu_cper_entry_fill_section_desc()
135 section_desc->flag_bits.latent_err = 1; in amdgpu_cper_entry_fill_section_desc()
146 struct cper_sec_crashdump_fatal *section; in amdgpu_cper_entry_fill_fatal_section() local
149 section = (struct cper_sec_crashdump_fatal *)((uint8_t *)hdr + in amdgpu_cper_entry_fill_fatal_section()
150 FATAL_SEC_OFFSET(hdr->sec_cnt, idx)); in amdgpu_cper_entry_fill_fatal_section()
154 FATAL_SEC_OFFSET(hdr->sec_cnt, idx)); in amdgpu_cper_entry_fill_fatal_section()
156 section->body.reg_ctx_type = CPER_CTX_TYPE_CRASH; in amdgpu_cper_entry_fill_fatal_section()
157 section->body.reg_arr_size = sizeof(reg_data); in amdgpu_cper_entry_fill_fatal_section()
158 section->body.data = reg_data; in amdgpu_cper_entry_fill_fatal_section()
173 struct cper_sec_nonstd_err *section; in amdgpu_cper_entry_fill_runtime_section() local
178 section = (struct cper_sec_nonstd_err *)((uint8_t *)hdr + in amdgpu_cper_entry_fill_runtime_section()
179 NONSTD_SEC_OFFSET(hdr->sec_cnt, idx)); in amdgpu_cper_entry_fill_runtime_section()
183 NONSTD_SEC_OFFSET(hdr->sec_cnt, idx)); in amdgpu_cper_entry_fill_runtime_section()
187 section->hdr.valid_bits.err_info_cnt = 1; in amdgpu_cper_entry_fill_runtime_section()
188 section->hdr.valid_bits.err_context_cnt = 1; in amdgpu_cper_entry_fill_runtime_section()
190 section->info.error_type = RUNTIME; in amdgpu_cper_entry_fill_runtime_section()
191 section->info.ms_chk_bits.err_type_valid = 1; in amdgpu_cper_entry_fill_runtime_section()
192 section->ctx.reg_ctx_type = CPER_CTX_TYPE_CRASH; in amdgpu_cper_entry_fill_runtime_section()
193 section->ctx.reg_arr_size = sizeof(section->ctx.reg_dump); in amdgpu_cper_entry_fill_runtime_section()
195 memcpy(section->ctx.reg_dump, reg_dump, reg_count * sizeof(uint32_t)); in amdgpu_cper_entry_fill_runtime_section()
207 struct cper_sec_nonstd_err *section; in amdgpu_cper_entry_fill_bad_page_threshold_section() local
211 section = (struct cper_sec_nonstd_err *)((uint8_t *)hdr + in amdgpu_cper_entry_fill_bad_page_threshold_section()
212 NONSTD_SEC_OFFSET(hdr->sec_cnt, idx)); in amdgpu_cper_entry_fill_bad_page_threshold_section()
216 NONSTD_SEC_OFFSET(hdr->sec_cnt, idx)); in amdgpu_cper_entry_fill_bad_page_threshold_section()
218 section->hdr.valid_bits.err_info_cnt = 1; in amdgpu_cper_entry_fill_bad_page_threshold_section()
219 section->hdr.valid_bits.err_context_cnt = 1; in amdgpu_cper_entry_fill_bad_page_threshold_section()
221 section->info.error_type = RUNTIME; in amdgpu_cper_entry_fill_bad_page_threshold_section()
222 section->info.valid_bits.ms_chk = 1; in amdgpu_cper_entry_fill_bad_page_threshold_section()
223 section->info.ms_chk_bits.err_type_valid = 1; in amdgpu_cper_entry_fill_bad_page_threshold_section()
224 section->info.ms_chk_bits.err_type = 1; in amdgpu_cper_entry_fill_bad_page_threshold_section()
225 section->info.ms_chk_bits.pcc = 1; in amdgpu_cper_entry_fill_bad_page_threshold_section()
226 section->ctx.reg_ctx_type = CPER_CTX_TYPE_CRASH; in amdgpu_cper_entry_fill_bad_page_threshold_section()
227 section->ctx.reg_arr_size = sizeof(section->ctx.reg_dump); in amdgpu_cper_entry_fill_bad_page_threshold_section()
230 socket_id = (adev->smuio.funcs && adev->smuio.funcs->get_socket_id) ? in amdgpu_cper_entry_fill_bad_page_threshold_section()
231 adev->smuio.funcs->get_socket_id(adev) : in amdgpu_cper_entry_fill_bad_page_threshold_section()
233 section->ctx.reg_dump[CPER_ACA_REG_CTL_LO] = 0x1; in amdgpu_cper_entry_fill_bad_page_threshold_section()
234 section->ctx.reg_dump[CPER_ACA_REG_CTL_HI] = 0x0; in amdgpu_cper_entry_fill_bad_page_threshold_section()
235 section->ctx.reg_dump[CPER_ACA_REG_STATUS_LO] = 0x137; in amdgpu_cper_entry_fill_bad_page_threshold_section()
236 section->ctx.reg_dump[CPER_ACA_REG_STATUS_HI] = 0xB0000000; in amdgpu_cper_entry_fill_bad_page_threshold_section()
237 section->ctx.reg_dump[CPER_ACA_REG_ADDR_LO] = 0x0; in amdgpu_cper_entry_fill_bad_page_threshold_section()
238 section->ctx.reg_dump[CPER_ACA_REG_ADDR_HI] = 0x0; in amdgpu_cper_entry_fill_bad_page_threshold_section()
239 section->ctx.reg_dump[CPER_ACA_REG_MISC0_LO] = 0x0; in amdgpu_cper_entry_fill_bad_page_threshold_section()
240 section->ctx.reg_dump[CPER_ACA_REG_MISC0_HI] = 0x0; in amdgpu_cper_entry_fill_bad_page_threshold_section()
241 section->ctx.reg_dump[CPER_ACA_REG_CONFIG_LO] = 0x2; in amdgpu_cper_entry_fill_bad_page_threshold_section()
242 section->ctx.reg_dump[CPER_ACA_REG_CONFIG_HI] = 0x1ff; in amdgpu_cper_entry_fill_bad_page_threshold_section()
243 section->ctx.reg_dump[CPER_ACA_REG_IPID_LO] = (socket_id / 4) & 0x01; in amdgpu_cper_entry_fill_bad_page_threshold_section()
244 section->ctx.reg_dump[CPER_ACA_REG_IPID_HI] = 0x096 | (((socket_id % 4) & 0x3) << 12); in amdgpu_cper_entry_fill_bad_page_threshold_section()
245 section->ctx.reg_dump[CPER_ACA_REG_SYND_LO] = 0x0; in amdgpu_cper_entry_fill_bad_page_threshold_section()
246 section->ctx.reg_dump[CPER_ACA_REG_SYND_HI] = 0x0; in amdgpu_cper_entry_fill_bad_page_threshold_section()
275 dev_err(adev->dev, "Unknown CPER Type!\n"); in amdgpu_cper_alloc_entry()
284 hdr->sec_cnt = section_count; in amdgpu_cper_alloc_entry()
294 struct amdgpu_ring *ring = &adev->cper.ring_buf; in amdgpu_cper_generate_ue_record()
299 dev_err(adev->dev, "fail to alloc cper entry for ue record\n"); in amdgpu_cper_generate_ue_record()
300 return -ENOMEM; in amdgpu_cper_generate_ue_record()
303 reg_data.status_lo = lower_32_bits(bank->regs[ACA_REG_IDX_STATUS]); in amdgpu_cper_generate_ue_record()
304 reg_data.status_hi = upper_32_bits(bank->regs[ACA_REG_IDX_STATUS]); in amdgpu_cper_generate_ue_record()
305 reg_data.addr_lo = lower_32_bits(bank->regs[ACA_REG_IDX_ADDR]); in amdgpu_cper_generate_ue_record()
306 reg_data.addr_hi = upper_32_bits(bank->regs[ACA_REG_IDX_ADDR]); in amdgpu_cper_generate_ue_record()
307 reg_data.ipid_lo = lower_32_bits(bank->regs[ACA_REG_IDX_IPID]); in amdgpu_cper_generate_ue_record()
308 reg_data.ipid_hi = upper_32_bits(bank->regs[ACA_REG_IDX_IPID]); in amdgpu_cper_generate_ue_record()
309 reg_data.synd_lo = lower_32_bits(bank->regs[ACA_REG_IDX_SYND]); in amdgpu_cper_generate_ue_record()
310 reg_data.synd_hi = upper_32_bits(bank->regs[ACA_REG_IDX_SYND]); in amdgpu_cper_generate_ue_record()
317 amdgpu_cper_ring_write(ring, fatal, fatal->record_length); in amdgpu_cper_generate_ue_record()
326 struct amdgpu_ring *ring = &adev->cper.ring_buf; in amdgpu_cper_generate_bp_threshold_record()
331 dev_err(adev->dev, "fail to alloc cper entry for bad page threshold record\n"); in amdgpu_cper_generate_bp_threshold_record()
332 return -ENOMEM; in amdgpu_cper_generate_bp_threshold_record()
342 amdgpu_cper_ring_write(ring, bp_threshold, bp_threshold->record_length); in amdgpu_cper_generate_bp_threshold_record()
359 dev_err(adev->dev, "Unknown ACA error type!\n"); in amdgpu_aca_err_type_to_cper_sev()
370 struct amdgpu_ring *ring = &adev->cper.ring_buf; in amdgpu_cper_generate_ce_records()
379 dev_err(adev->dev, "fail to allocate cper entry for ce records\n"); in amdgpu_cper_generate_ce_records()
380 return -ENOMEM; in amdgpu_cper_generate_ce_records()
384 list_for_each_entry(node, &banks->list, node) { in amdgpu_cper_generate_ce_records()
385 bank = &node->bank; in amdgpu_cper_generate_ce_records()
386 if (bank->aca_err_type == ACA_ERROR_TYPE_DEFERRED) { in amdgpu_cper_generate_ce_records()
395 list_for_each_entry(node, &banks->list, node) { in amdgpu_cper_generate_ce_records()
396 bank = &node->bank; in amdgpu_cper_generate_ce_records()
397 reg_data[CPER_ACA_REG_CTL_LO] = lower_32_bits(bank->regs[ACA_REG_IDX_CTL]); in amdgpu_cper_generate_ce_records()
398 reg_data[CPER_ACA_REG_CTL_HI] = upper_32_bits(bank->regs[ACA_REG_IDX_CTL]); in amdgpu_cper_generate_ce_records()
399 reg_data[CPER_ACA_REG_STATUS_LO] = lower_32_bits(bank->regs[ACA_REG_IDX_STATUS]); in amdgpu_cper_generate_ce_records()
400 reg_data[CPER_ACA_REG_STATUS_HI] = upper_32_bits(bank->regs[ACA_REG_IDX_STATUS]); in amdgpu_cper_generate_ce_records()
401 reg_data[CPER_ACA_REG_ADDR_LO] = lower_32_bits(bank->regs[ACA_REG_IDX_ADDR]); in amdgpu_cper_generate_ce_records()
402 reg_data[CPER_ACA_REG_ADDR_HI] = upper_32_bits(bank->regs[ACA_REG_IDX_ADDR]); in amdgpu_cper_generate_ce_records()
403 reg_data[CPER_ACA_REG_MISC0_LO] = lower_32_bits(bank->regs[ACA_REG_IDX_MISC0]); in amdgpu_cper_generate_ce_records()
404 reg_data[CPER_ACA_REG_MISC0_HI] = upper_32_bits(bank->regs[ACA_REG_IDX_MISC0]); in amdgpu_cper_generate_ce_records()
405 reg_data[CPER_ACA_REG_CONFIG_LO] = lower_32_bits(bank->regs[ACA_REG_IDX_CONFIG]); in amdgpu_cper_generate_ce_records()
406 reg_data[CPER_ACA_REG_CONFIG_HI] = upper_32_bits(bank->regs[ACA_REG_IDX_CONFIG]); in amdgpu_cper_generate_ce_records()
407 reg_data[CPER_ACA_REG_IPID_LO] = lower_32_bits(bank->regs[ACA_REG_IDX_IPID]); in amdgpu_cper_generate_ce_records()
408 reg_data[CPER_ACA_REG_IPID_HI] = upper_32_bits(bank->regs[ACA_REG_IDX_IPID]); in amdgpu_cper_generate_ce_records()
409 reg_data[CPER_ACA_REG_SYND_LO] = lower_32_bits(bank->regs[ACA_REG_IDX_SYND]); in amdgpu_cper_generate_ce_records()
410 reg_data[CPER_ACA_REG_SYND_HI] = upper_32_bits(bank->regs[ACA_REG_IDX_SYND]); in amdgpu_cper_generate_ce_records()
413 amdgpu_aca_err_type_to_cper_sev(adev, bank->aca_err_type), in amdgpu_cper_generate_ce_records()
419 amdgpu_cper_ring_write(ring, corrected, corrected->record_length); in amdgpu_cper_generate_ce_records()
429 chdr = (struct cper_hdr *)&(ring->ring[pos]); in amdgpu_cper_is_hdr()
430 return strcmp(chdr->signature, "CPER") ? false : true; in amdgpu_cper_is_hdr()
439 chdr = (struct cper_hdr *)&(ring->ring[pos]); in amdgpu_cper_ring_get_ent_sz()
440 chunk = ring->ring_size - (pos << 2); in amdgpu_cper_ring_get_ent_sz()
442 if (!strcmp(chdr->signature, "CPER")) { in amdgpu_cper_ring_get_ent_sz()
443 rec_len = chdr->record_length; in amdgpu_cper_ring_get_ent_sz()
447 /* ring buffer is not full, no cper data after ring->wptr */ in amdgpu_cper_ring_get_ent_sz()
448 if (ring->count_dw) in amdgpu_cper_ring_get_ent_sz()
451 for (p = pos + 1; p <= ring->buf_mask; p++) { in amdgpu_cper_ring_get_ent_sz()
452 chdr = (struct cper_hdr *)&(ring->ring[p]); in amdgpu_cper_ring_get_ent_sz()
453 if (!strcmp(chdr->signature, "CPER")) { in amdgpu_cper_ring_get_ent_sz()
454 rec_len = (p - pos) << 2; in amdgpu_cper_ring_get_ent_sz()
473 if (count >= ring->ring_size - 4) { in amdgpu_cper_ring_write()
474 dev_err(ring->adev->dev, in amdgpu_cper_ring_write()
476 count, ring->ring_size - 4); in amdgpu_cper_ring_write()
481 mutex_lock(&ring->adev->cper.ring_lock); in amdgpu_cper_ring_write()
483 wptr_old = ring->wptr; in amdgpu_cper_ring_write()
484 rptr = *ring->rptr_cpu_addr & ring->ptr_mask; in amdgpu_cper_ring_write()
487 ent_sz = amdgpu_cper_ring_get_ent_sz(ring, ring->wptr); in amdgpu_cper_ring_write()
490 memcpy(&ring->ring[ring->wptr], s, chunk); in amdgpu_cper_ring_write()
492 ring->wptr += (chunk >> 2); in amdgpu_cper_ring_write()
493 ring->wptr &= ring->ptr_mask; in amdgpu_cper_ring_write()
494 count -= chunk; in amdgpu_cper_ring_write()
498 if (ring->count_dw < rec_cnt_dw) in amdgpu_cper_ring_write()
499 ring->count_dw = 0; in amdgpu_cper_ring_write()
502 if (((wptr_old < rptr) && (rptr <= ring->wptr)) || in amdgpu_cper_ring_write()
503 ((ring->wptr < wptr_old) && (wptr_old < rptr)) || in amdgpu_cper_ring_write()
504 ((rptr <= ring->wptr) && (ring->wptr < wptr_old))) { in amdgpu_cper_ring_write()
505 pos = (ring->wptr + 1) & ring->ptr_mask; in amdgpu_cper_ring_write()
511 rptr &= ring->ptr_mask; in amdgpu_cper_ring_write()
512 *ring->rptr_cpu_addr = rptr; in amdgpu_cper_ring_write()
518 if (ring->count_dw >= rec_cnt_dw) in amdgpu_cper_ring_write()
519 ring->count_dw -= rec_cnt_dw; in amdgpu_cper_ring_write()
520 mutex_unlock(&ring->adev->cper.ring_lock); in amdgpu_cper_ring_write()
525 return *(ring->rptr_cpu_addr); in amdgpu_cper_ring_get_rptr()
530 return ring->wptr; in amdgpu_cper_ring_get_wptr()
543 struct amdgpu_ring *ring = &(adev->cper.ring_buf); in amdgpu_cper_ring_init()
545 mutex_init(&adev->cper.ring_lock); in amdgpu_cper_ring_init()
547 ring->adev = NULL; in amdgpu_cper_ring_init()
548 ring->ring_obj = NULL; in amdgpu_cper_ring_init()
549 ring->use_doorbell = false; in amdgpu_cper_ring_init()
550 ring->no_scheduler = true; in amdgpu_cper_ring_init()
551 ring->funcs = &cper_ring_funcs; in amdgpu_cper_ring_init()
553 sprintf(ring->name, "cper"); in amdgpu_cper_ring_init()
567 dev_err(adev->dev, "failed to initialize cper ring, r = %d\n", r); in amdgpu_cper_init()
571 mutex_init(&adev->cper.cper_lock); in amdgpu_cper_init()
573 adev->cper.enabled = true; in amdgpu_cper_init()
574 adev->cper.max_count = CPER_MAX_ALLOWED_COUNT; in amdgpu_cper_init()
584 adev->cper.enabled = false; in amdgpu_cper_fini()
586 amdgpu_ring_fini(&(adev->cper.ring_buf)); in amdgpu_cper_fini()
587 adev->cper.count = 0; in amdgpu_cper_fini()
588 adev->cper.wptr = 0; in amdgpu_cper_fini()