1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright 2025 Advanced Micro Devices, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 */
24 #include "ras.h"
25 #include "ras_core_status.h"
26
27 #define RAS_SEQNO_FIFO_SIZE (128 * sizeof(uint64_t))
28
29 #define IS_LEAP_YEAR(x) ((x % 4 == 0 && x % 100 != 0) || x % 400 == 0)
30
31 static const char * const ras_block_name[] = {
32 "umc",
33 "sdma",
34 "gfx",
35 "mmhub",
36 "athub",
37 "pcie_bif",
38 "hdp",
39 "xgmi_wafl",
40 "df",
41 "smn",
42 "sem",
43 "mp0",
44 "mp1",
45 "fuse",
46 "mca",
47 "vcn",
48 "jpeg",
49 "ih",
50 "mpio",
51 };
52
ras_core_get_ras_block_name(enum ras_block_id block_id)53 const char *ras_core_get_ras_block_name(enum ras_block_id block_id)
54 {
55 if (block_id >= ARRAY_SIZE(ras_block_name))
56 return "";
57
58 return ras_block_name[block_id];
59 }
60
ras_core_convert_timestamp_to_time(struct ras_core_context * ras_core,uint64_t timestamp,struct ras_time * tm)61 int ras_core_convert_timestamp_to_time(struct ras_core_context *ras_core,
62 uint64_t timestamp, struct ras_time *tm)
63 {
64 int days_in_month[] = {31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31};
65 uint64_t month = 0, day = 0, hour = 0, minute = 0, second = 0;
66 uint32_t year = 0;
67 int seconds_per_day = 24 * 60 * 60;
68 int seconds_per_hour = 60 * 60;
69 int seconds_per_minute = 60;
70 int days, remaining_seconds;
71
72 days = div64_u64_rem(timestamp, seconds_per_day, (uint64_t *)&remaining_seconds);
73
74 /* utc_timestamp follows the Unix epoch */
75 year = 1970;
76 while (days >= 365) {
77 if (IS_LEAP_YEAR(year)) {
78 if (days < 366)
79 break;
80 days -= 366;
81 } else {
82 days -= 365;
83 }
84 year++;
85 }
86
87 days_in_month[1] += IS_LEAP_YEAR(year);
88
89 month = 0;
90 while (days >= days_in_month[month]) {
91 days -= days_in_month[month];
92 month++;
93 }
94 month++;
95 day = days + 1;
96
97 if (remaining_seconds) {
98 hour = remaining_seconds / seconds_per_hour;
99 minute = (remaining_seconds % seconds_per_hour) / seconds_per_minute;
100 second = remaining_seconds % seconds_per_minute;
101 }
102
103 tm->tm_year = year;
104 tm->tm_mon = month;
105 tm->tm_mday = day;
106 tm->tm_hour = hour;
107 tm->tm_min = minute;
108 tm->tm_sec = second;
109
110 return 0;
111 }
112
ras_core_gpu_in_reset(struct ras_core_context * ras_core)113 bool ras_core_gpu_in_reset(struct ras_core_context *ras_core)
114 {
115 uint32_t status = 0;
116
117 if (ras_core->sys_fn &&
118 ras_core->sys_fn->check_gpu_status)
119 ras_core->sys_fn->check_gpu_status(ras_core, &status);
120
121 return (status & RAS_GPU_STATUS__IN_RESET) ? true : false;
122 }
123
ras_core_gpu_is_vf(struct ras_core_context * ras_core)124 bool ras_core_gpu_is_vf(struct ras_core_context *ras_core)
125 {
126 uint32_t status = 0;
127
128 if (ras_core->sys_fn &&
129 ras_core->sys_fn->check_gpu_status)
130 ras_core->sys_fn->check_gpu_status(ras_core, &status);
131
132 return (status & RAS_GPU_STATUS__IS_VF) ? true : false;
133 }
134
ras_core_gpu_is_rma(struct ras_core_context * ras_core)135 bool ras_core_gpu_is_rma(struct ras_core_context *ras_core)
136 {
137 if (!ras_core)
138 return false;
139
140 return ras_core->is_rma;
141 }
142
ras_core_seqno_fifo_write(struct ras_core_context * ras_core,enum ras_seqno_fifo fifo_type,uint64_t seqno)143 static int ras_core_seqno_fifo_write(struct ras_core_context *ras_core,
144 enum ras_seqno_fifo fifo_type, uint64_t seqno)
145 {
146 int ret = 0;
147 struct kfifo *seqno_fifo = NULL;
148
149 if (fifo_type == SEQNO_FIFO_POISON_CREATION)
150 seqno_fifo = &ras_core->de_seqno_fifo;
151 else if (fifo_type == SEQNO_FIFO_POISON_CONSUMPTION)
152 seqno_fifo = &ras_core->consumption_seqno_fifo;
153
154 if (seqno_fifo)
155 ret = kfifo_in_spinlocked(seqno_fifo,
156 &seqno, sizeof(seqno), &ras_core->seqno_lock);
157
158 return ret ? 0 : -EINVAL;
159 }
160
ras_core_seqno_fifo_read(struct ras_core_context * ras_core,enum ras_seqno_fifo fifo_type,uint64_t * seqno,bool pop)161 static int ras_core_seqno_fifo_read(struct ras_core_context *ras_core,
162 enum ras_seqno_fifo fifo_type, uint64_t *seqno, bool pop)
163 {
164 int ret = 0;
165 struct kfifo *seqno_fifo = NULL;
166
167 if (fifo_type == SEQNO_FIFO_POISON_CREATION)
168 seqno_fifo = &ras_core->de_seqno_fifo;
169 else if (fifo_type == SEQNO_FIFO_POISON_CONSUMPTION)
170 seqno_fifo = &ras_core->consumption_seqno_fifo;
171
172 if (seqno_fifo) {
173 if (pop)
174 ret = kfifo_out_spinlocked(seqno_fifo,
175 seqno, sizeof(*seqno), &ras_core->seqno_lock);
176 else
177 ret = kfifo_out_peek(seqno_fifo, seqno, sizeof(*seqno));
178 }
179
180 return ret ? 0 : -EINVAL;
181 }
182
ras_core_gen_seqno(struct ras_core_context * ras_core,enum ras_seqno_type type)183 uint64_t ras_core_gen_seqno(struct ras_core_context *ras_core,
184 enum ras_seqno_type type)
185 {
186 uint64_t seqno = 0;
187
188 if (ras_core->sys_fn &&
189 ras_core->sys_fn->gen_seqno)
190 ras_core->sys_fn->gen_seqno(ras_core, type, &seqno);
191
192 return seqno;
193 }
194
ras_core_put_seqno(struct ras_core_context * ras_core,enum ras_seqno_type seqno_type,uint64_t seqno)195 int ras_core_put_seqno(struct ras_core_context *ras_core,
196 enum ras_seqno_type seqno_type, uint64_t seqno)
197 {
198 int ret = 0;
199
200 if (seqno_type >= RAS_SEQNO_TYPE_COUNT_MAX)
201 return -EINVAL;
202
203 if (seqno_type == RAS_SEQNO_TYPE_DE)
204 ret = ras_core_seqno_fifo_write(ras_core,
205 SEQNO_FIFO_POISON_CREATION, seqno);
206 else if (seqno_type == RAS_SEQNO_TYPE_POISON_CONSUMPTION)
207 ret = ras_core_seqno_fifo_write(ras_core,
208 SEQNO_FIFO_POISON_CONSUMPTION, seqno);
209 else
210 ret = -EINVAL;
211
212 return ret;
213 }
214
ras_core_get_seqno(struct ras_core_context * ras_core,enum ras_seqno_type seqno_type,bool pop)215 uint64_t ras_core_get_seqno(struct ras_core_context *ras_core,
216 enum ras_seqno_type seqno_type, bool pop)
217 {
218 uint64_t seq_no;
219 int ret = -ENODATA;
220
221 if (seqno_type >= RAS_SEQNO_TYPE_COUNT_MAX)
222 return 0;
223
224 if (seqno_type == RAS_SEQNO_TYPE_DE)
225 ret = ras_core_seqno_fifo_read(ras_core,
226 SEQNO_FIFO_POISON_CREATION, &seq_no, pop);
227 else if (seqno_type == RAS_SEQNO_TYPE_POISON_CONSUMPTION)
228 ret = ras_core_seqno_fifo_read(ras_core,
229 SEQNO_FIFO_POISON_CONSUMPTION, &seq_no, pop);
230
231 if (ret)
232 seq_no = ras_core_gen_seqno(ras_core, seqno_type);
233
234 return seq_no;
235 }
236
ras_core_eeprom_recovery(struct ras_core_context * ras_core)237 static int ras_core_eeprom_recovery(struct ras_core_context *ras_core)
238 {
239 int count;
240 int ret;
241
242 count = ras_eeprom_get_record_count(ras_core);
243 if (!count)
244 return 0;
245
246 /* Avoid bad page to be loaded again after gpu reset */
247 if (ras_umc_get_saved_eeprom_count(ras_core) >= count)
248 return 0;
249
250 ret = ras_umc_load_bad_pages(ras_core);
251 if (ret) {
252 RAS_DEV_ERR(ras_core->dev, "ras_umc_load_bad_pages failed: %d\n", ret);
253 return ret;
254 }
255
256 ras_eeprom_sync_info(ras_core);
257
258 return ret;
259 }
260
ras_core_create(struct ras_core_config * init_config)261 struct ras_core_context *ras_core_create(struct ras_core_config *init_config)
262 {
263 struct ras_core_context *ras_core;
264 struct ras_core_config *config;
265
266 ras_core = kzalloc_obj(*ras_core);
267 if (!ras_core)
268 return NULL;
269
270 config = kzalloc_obj(*config);
271 if (!config) {
272 kfree(ras_core);
273 return NULL;
274 }
275
276 memcpy(config, init_config, sizeof(*config));
277 ras_core->config = config;
278
279 return ras_core;
280 }
281
ras_core_destroy(struct ras_core_context * ras_core)282 void ras_core_destroy(struct ras_core_context *ras_core)
283 {
284 if (ras_core)
285 kfree(ras_core->config);
286
287 kfree(ras_core);
288 }
289
ras_core_sw_init(struct ras_core_context * ras_core)290 int ras_core_sw_init(struct ras_core_context *ras_core)
291 {
292 int ret;
293
294 if (!ras_core->config) {
295 RAS_DEV_ERR(ras_core->dev, "No ras core config!\n");
296 return -EINVAL;
297 }
298
299 ras_core->sys_fn = ras_core->config->sys_fn;
300 if (!ras_core->sys_fn)
301 return -EINVAL;
302
303 ret = kfifo_alloc(&ras_core->de_seqno_fifo,
304 RAS_SEQNO_FIFO_SIZE, GFP_KERNEL);
305 if (ret)
306 return ret;
307
308 ret = kfifo_alloc(&ras_core->consumption_seqno_fifo,
309 RAS_SEQNO_FIFO_SIZE, GFP_KERNEL);
310 if (ret)
311 return ret;
312
313 spin_lock_init(&ras_core->seqno_lock);
314
315 ret = ras_aca_sw_init(ras_core);
316 if (ret)
317 return ret;
318
319 ret = ras_umc_sw_init(ras_core);
320 if (ret)
321 return ret;
322
323 ret = ras_cmd_init(ras_core);
324 if (ret)
325 return ret;
326
327 ret = ras_log_ring_sw_init(ras_core);
328 if (ret)
329 return ret;
330
331 ret = ras_psp_sw_init(ras_core);
332 if (ret)
333 return ret;
334
335 return 0;
336 }
337
ras_core_sw_fini(struct ras_core_context * ras_core)338 int ras_core_sw_fini(struct ras_core_context *ras_core)
339 {
340 kfifo_free(&ras_core->de_seqno_fifo);
341 kfifo_free(&ras_core->consumption_seqno_fifo);
342
343 ras_psp_sw_fini(ras_core);
344 ras_log_ring_sw_fini(ras_core);
345 ras_cmd_fini(ras_core);
346 ras_umc_sw_fini(ras_core);
347 ras_aca_sw_fini(ras_core);
348
349 return 0;
350 }
351
ras_core_hw_init(struct ras_core_context * ras_core)352 int ras_core_hw_init(struct ras_core_context *ras_core)
353 {
354 int ret;
355
356 ras_core->ras_eeprom_supported =
357 ras_core->config->ras_eeprom_supported;
358
359 ras_core->poison_supported = ras_core->config->poison_supported;
360
361 ret = ras_psp_hw_init(ras_core);
362 if (ret)
363 return ret;
364
365 ret = ras_aca_hw_init(ras_core);
366 if (ret)
367 goto init_err1;
368
369 ret = ras_mp1_hw_init(ras_core);
370 if (ret)
371 goto init_err2;
372
373 ret = ras_nbio_hw_init(ras_core);
374 if (ret)
375 goto init_err3;
376
377 ret = ras_umc_hw_init(ras_core);
378 if (ret)
379 goto init_err4;
380
381 ret = ras_gfx_hw_init(ras_core);
382 if (ret)
383 goto init_err5;
384
385 ret = ras_eeprom_hw_init(ras_core);
386 if (ret)
387 goto init_err6;
388
389 ret = ras_core_eeprom_recovery(ras_core);
390 if (ret) {
391 RAS_DEV_ERR(ras_core->dev,
392 "Failed to recovery ras core, ret:%d\n", ret);
393 goto init_err6;
394 }
395
396 ret = ras_eeprom_check_storage_status(ras_core);
397 if (ret)
398 goto init_err6;
399
400 ret = ras_process_init(ras_core);
401 if (ret)
402 goto init_err7;
403
404 ras_core->is_initialized = true;
405
406 return 0;
407
408 init_err7:
409 ras_eeprom_hw_fini(ras_core);
410 init_err6:
411 ras_gfx_hw_fini(ras_core);
412 init_err5:
413 ras_umc_hw_fini(ras_core);
414 init_err4:
415 ras_nbio_hw_fini(ras_core);
416 init_err3:
417 ras_mp1_hw_fini(ras_core);
418 init_err2:
419 ras_aca_hw_fini(ras_core);
420 init_err1:
421 ras_psp_hw_fini(ras_core);
422 return ret;
423 }
424
ras_core_hw_fini(struct ras_core_context * ras_core)425 int ras_core_hw_fini(struct ras_core_context *ras_core)
426 {
427 ras_core->is_initialized = false;
428
429 ras_process_fini(ras_core);
430 ras_eeprom_hw_fini(ras_core);
431 ras_gfx_hw_fini(ras_core);
432 ras_nbio_hw_fini(ras_core);
433 ras_umc_hw_fini(ras_core);
434 ras_mp1_hw_fini(ras_core);
435 ras_aca_hw_fini(ras_core);
436 ras_psp_hw_fini(ras_core);
437
438 return 0;
439 }
440
ras_core_handle_nbio_irq(struct ras_core_context * ras_core,void * data)441 bool ras_core_handle_nbio_irq(struct ras_core_context *ras_core, void *data)
442 {
443 return ras_nbio_handle_irq_error(ras_core, data);
444 }
445
ras_core_handle_fatal_error(struct ras_core_context * ras_core)446 int ras_core_handle_fatal_error(struct ras_core_context *ras_core)
447 {
448 int ret = 0;
449
450 ras_aca_mark_fatal_flag(ras_core);
451
452 ret = ras_core_event_notify(ras_core,
453 RAS_EVENT_ID__FATAL_ERROR_DETECTED, NULL);
454
455 return ret;
456 }
457
ras_core_get_curr_nps_mode(struct ras_core_context * ras_core)458 uint32_t ras_core_get_curr_nps_mode(struct ras_core_context *ras_core)
459 {
460 if (ras_core->ras_nbio.ip_func &&
461 ras_core->ras_nbio.ip_func->get_memory_partition_mode)
462 return ras_core->ras_nbio.ip_func->get_memory_partition_mode(ras_core);
463
464 RAS_DEV_ERR(ras_core->dev, "Failed to get gpu memory nps mode!\n");
465 return 0;
466 }
467
ras_core_update_ecc_info(struct ras_core_context * ras_core)468 int ras_core_update_ecc_info(struct ras_core_context *ras_core)
469 {
470 int ret;
471
472 ret = ras_aca_update_ecc(ras_core, RAS_ERR_TYPE__CE, NULL);
473 if (!ret)
474 ret = ras_aca_update_ecc(ras_core, RAS_ERR_TYPE__UE, NULL);
475
476 return ret;
477 }
478
ras_core_query_block_ecc_data(struct ras_core_context * ras_core,enum ras_block_id block,struct ras_ecc_count * ecc_count)479 int ras_core_query_block_ecc_data(struct ras_core_context *ras_core,
480 enum ras_block_id block, struct ras_ecc_count *ecc_count)
481 {
482 int ret;
483
484 if (!ecc_count || (block >= RAS_BLOCK_ID__LAST) || !ras_core)
485 return -EINVAL;
486
487 ret = ras_aca_get_block_ecc_count(ras_core, block, ecc_count);
488 if (!ret)
489 ras_aca_clear_block_new_ecc_count(ras_core, block);
490
491 return ret;
492 }
493
ras_core_set_status(struct ras_core_context * ras_core,bool enable)494 int ras_core_set_status(struct ras_core_context *ras_core, bool enable)
495 {
496 ras_core->ras_core_enabled = enable;
497
498 return 0;
499 }
500
ras_core_is_enabled(struct ras_core_context * ras_core)501 bool ras_core_is_enabled(struct ras_core_context *ras_core)
502 {
503 return ras_core->ras_core_enabled;
504 }
505
ras_core_get_utc_second_timestamp(struct ras_core_context * ras_core)506 uint64_t ras_core_get_utc_second_timestamp(struct ras_core_context *ras_core)
507 {
508 if (ras_core && ras_core->sys_fn &&
509 ras_core->sys_fn->get_utc_second_timestamp)
510 return ras_core->sys_fn->get_utc_second_timestamp(ras_core);
511
512 RAS_DEV_ERR(ras_core->dev, "Failed to get system timestamp!\n");
513 return 0;
514 }
515
ras_core_translate_soc_pa_and_bank(struct ras_core_context * ras_core,uint64_t * soc_pa,struct umc_bank_addr * bank_addr,bool bank_to_pa)516 int ras_core_translate_soc_pa_and_bank(struct ras_core_context *ras_core,
517 uint64_t *soc_pa, struct umc_bank_addr *bank_addr, bool bank_to_pa)
518 {
519 if (!ras_core || !soc_pa || !bank_addr)
520 return -EINVAL;
521
522 return ras_umc_translate_soc_pa_and_bank(ras_core, soc_pa, bank_addr, bank_to_pa);
523 }
524
ras_core_ras_interrupt_detected(struct ras_core_context * ras_core)525 bool ras_core_ras_interrupt_detected(struct ras_core_context *ras_core)
526 {
527 if (ras_core && ras_core->sys_fn &&
528 ras_core->sys_fn->detect_ras_interrupt)
529 return ras_core->sys_fn->detect_ras_interrupt(ras_core);
530
531 RAS_DEV_ERR(ras_core->dev, "Failed to detect ras interrupt!\n");
532 return false;
533 }
534
ras_core_get_gpu_mem(struct ras_core_context * ras_core,enum gpu_mem_type mem_type,struct gpu_mem_block * gpu_mem)535 int ras_core_get_gpu_mem(struct ras_core_context *ras_core,
536 enum gpu_mem_type mem_type, struct gpu_mem_block *gpu_mem)
537 {
538 if (ras_core->sys_fn && ras_core->sys_fn->get_gpu_mem)
539 return ras_core->sys_fn->get_gpu_mem(ras_core, mem_type, gpu_mem);
540
541 RAS_DEV_ERR(ras_core->dev, "Not config get gpu memory API!\n");
542 return -EACCES;
543 }
544
ras_core_put_gpu_mem(struct ras_core_context * ras_core,enum gpu_mem_type mem_type,struct gpu_mem_block * gpu_mem)545 int ras_core_put_gpu_mem(struct ras_core_context *ras_core,
546 enum gpu_mem_type mem_type, struct gpu_mem_block *gpu_mem)
547 {
548 if (ras_core->sys_fn && ras_core->sys_fn->put_gpu_mem)
549 return ras_core->sys_fn->put_gpu_mem(ras_core, mem_type, gpu_mem);
550
551 RAS_DEV_ERR(ras_core->dev, "Not config put gpu memory API!!\n");
552 return -EACCES;
553 }
554
ras_core_is_ready(struct ras_core_context * ras_core)555 bool ras_core_is_ready(struct ras_core_context *ras_core)
556 {
557 return ras_core ? ras_core->is_initialized : false;
558 }
559
ras_core_check_safety_watermark(struct ras_core_context * ras_core)560 bool ras_core_check_safety_watermark(struct ras_core_context *ras_core)
561 {
562 return ras_eeprom_check_safety_watermark(ras_core);
563 }
564
ras_core_down_trylock_gpu_reset_lock(struct ras_core_context * ras_core)565 int ras_core_down_trylock_gpu_reset_lock(struct ras_core_context *ras_core)
566 {
567 if (ras_core->sys_fn && ras_core->sys_fn->gpu_reset_lock)
568 return ras_core->sys_fn->gpu_reset_lock(ras_core, true, true);
569
570 return 1;
571 }
572
ras_core_down_gpu_reset_lock(struct ras_core_context * ras_core)573 void ras_core_down_gpu_reset_lock(struct ras_core_context *ras_core)
574 {
575 if (ras_core->sys_fn && ras_core->sys_fn->gpu_reset_lock)
576 ras_core->sys_fn->gpu_reset_lock(ras_core, true, false);
577 }
578
ras_core_up_gpu_reset_lock(struct ras_core_context * ras_core)579 void ras_core_up_gpu_reset_lock(struct ras_core_context *ras_core)
580 {
581 if (ras_core->sys_fn && ras_core->sys_fn->gpu_reset_lock)
582 ras_core->sys_fn->gpu_reset_lock(ras_core, false, false);
583 }
584
ras_core_event_notify(struct ras_core_context * ras_core,enum ras_notify_event event_id,void * data)585 int ras_core_event_notify(struct ras_core_context *ras_core,
586 enum ras_notify_event event_id, void *data)
587 {
588 if (ras_core && ras_core->sys_fn &&
589 ras_core->sys_fn->ras_notifier)
590 return ras_core->sys_fn->ras_notifier(ras_core, event_id, data);
591
592 return -RAS_CORE_NOT_SUPPORTED;
593 }
594
ras_core_get_device_system_info(struct ras_core_context * ras_core,struct device_system_info * dev_info)595 int ras_core_get_device_system_info(struct ras_core_context *ras_core,
596 struct device_system_info *dev_info)
597 {
598 if (ras_core && ras_core->sys_fn &&
599 ras_core->sys_fn->get_device_system_info)
600 return ras_core->sys_fn->get_device_system_info(ras_core, dev_info);
601
602 return -RAS_CORE_NOT_SUPPORTED;
603 }
604
ras_core_convert_soc_pa_to_cur_nps_pages(struct ras_core_context * ras_core,uint64_t soc_pa,uint64_t * page_pfn,uint32_t max_pages)605 int ras_core_convert_soc_pa_to_cur_nps_pages(struct ras_core_context *ras_core,
606 uint64_t soc_pa, uint64_t *page_pfn, uint32_t max_pages)
607 {
608 struct eeprom_umc_record record;
609 uint32_t cur_nps_mode;
610 int count = 0;
611
612 if (!ras_core || !page_pfn || !max_pages)
613 return -EINVAL;
614
615 cur_nps_mode = ras_core_get_curr_nps_mode(ras_core);
616 if (!cur_nps_mode || cur_nps_mode > UMC_MEMORY_PARTITION_MODE_NPS8)
617 return -EINVAL;
618
619 memset(&record, 0, sizeof(record));
620 record.cur_nps_retired_row_pfn = RAS_ADDR_TO_PFN(soc_pa);
621
622 count = ras_umc_convert_record_to_nps_pages(ras_core,
623 &record, cur_nps_mode, page_pfn, max_pages);
624
625 return count;
626 }
627