xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c (revision 7a5f1cd22d47f8ca4b760b6334378ae42c1bd24b)
1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <linux/module.h>
25 
26 #ifdef CONFIG_X86
27 #include <asm/hypervisor.h>
28 #endif
29 
30 #include <drm/drm_drv.h>
31 #include <xen/xen.h>
32 
33 #include "amdgpu.h"
34 #include "amdgpu_ras.h"
35 #include "amdgpu_reset.h"
36 #include "amdgpu_dpm.h"
37 #include "vi.h"
38 #include "soc15.h"
39 #include "nv.h"
40 #include "amdgpu_virt_ras_cmd.h"
41 
42 #define POPULATE_UCODE_INFO(vf2pf_info, ucode, ver) \
43 	do { \
44 		vf2pf_info->ucode_info[ucode].id = ucode; \
45 		vf2pf_info->ucode_info[ucode].version = ver; \
46 	} while (0)
47 
48 #define mmRCC_CONFIG_MEMSIZE    0xde3
49 
50 const char *amdgpu_virt_dynamic_crit_table_name[] = {
51 	"IP DISCOVERY",
52 	"VBIOS IMG",
53 	"RAS TELEMETRY",
54 	"DATA EXCHANGE",
55 	"BAD PAGE INFO",
56 	"INIT HEADER",
57 	"LAST",
58 };
59 
60 bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev)
61 {
62 	/* By now all MMIO pages except mailbox are blocked */
63 	/* if blocking is enabled in hypervisor. Choose the */
64 	/* SCRATCH_REG0 to test. */
65 	return RREG32_NO_KIQ(0xc040) == 0xffffffff;
66 }
67 
68 void amdgpu_virt_init_setting(struct amdgpu_device *adev)
69 {
70 	struct drm_device *ddev = adev_to_drm(adev);
71 
72 	/* enable virtual display */
73 	if (adev->asic_type != CHIP_ALDEBARAN &&
74 	    adev->asic_type != CHIP_ARCTURUS &&
75 	    ((adev->pdev->class >> 8) != PCI_CLASS_ACCELERATOR_PROCESSING)) {
76 		if (adev->mode_info.num_crtc == 0)
77 			adev->mode_info.num_crtc = 1;
78 		adev->enable_virtual_display = true;
79 	}
80 	ddev->driver_features &= ~DRIVER_ATOMIC;
81 	adev->cg_flags = 0;
82 	adev->pg_flags = 0;
83 
84 	/* Reduce kcq number to 2 to reduce latency */
85 	if (amdgpu_num_kcq == -1)
86 		amdgpu_num_kcq = 2;
87 }
88 
89 /**
90  * amdgpu_virt_request_full_gpu() - request full gpu access
91  * @adev:	amdgpu device.
92  * @init:	is driver init time.
93  * When start to init/fini driver, first need to request full gpu access.
94  * Return: Zero if request success, otherwise will return error.
95  */
96 int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init)
97 {
98 	struct amdgpu_virt *virt = &adev->virt;
99 	int r;
100 
101 	if (virt->ops && virt->ops->req_full_gpu) {
102 		r = virt->ops->req_full_gpu(adev, init);
103 		if (r) {
104 			adev->no_hw_access = true;
105 			return r;
106 		}
107 
108 		adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
109 	}
110 
111 	return 0;
112 }
113 
114 /**
115  * amdgpu_virt_release_full_gpu() - release full gpu access
116  * @adev:	amdgpu device.
117  * @init:	is driver init time.
118  * When finishing driver init/fini, need to release full gpu access.
119  * Return: Zero if release success, otherwise will returen error.
120  */
121 int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init)
122 {
123 	struct amdgpu_virt *virt = &adev->virt;
124 	int r;
125 
126 	if (virt->ops && virt->ops->rel_full_gpu) {
127 		r = virt->ops->rel_full_gpu(adev, init);
128 		if (r)
129 			return r;
130 
131 		adev->virt.caps |= AMDGPU_SRIOV_CAPS_RUNTIME;
132 	}
133 	return 0;
134 }
135 
136 /**
137  * amdgpu_virt_reset_gpu() - reset gpu
138  * @adev:	amdgpu device.
139  * Send reset command to GPU hypervisor to reset GPU that VM is using
140  * Return: Zero if reset success, otherwise will return error.
141  */
142 int amdgpu_virt_reset_gpu(struct amdgpu_device *adev)
143 {
144 	struct amdgpu_virt *virt = &adev->virt;
145 	int r;
146 
147 	if (virt->ops && virt->ops->reset_gpu) {
148 		r = virt->ops->reset_gpu(adev);
149 		if (r)
150 			return r;
151 
152 		adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
153 	}
154 
155 	return 0;
156 }
157 
158 void amdgpu_virt_request_init_data(struct amdgpu_device *adev)
159 {
160 	struct amdgpu_virt *virt = &adev->virt;
161 
162 	if (virt->ops && virt->ops->req_init_data)
163 		virt->ops->req_init_data(adev);
164 
165 	if (adev->virt.req_init_data_ver > 0)
166 		dev_info(adev->dev, "host supports REQ_INIT_DATA handshake of critical_region_version %d\n",
167 				 adev->virt.req_init_data_ver);
168 	else
169 		dev_warn(adev->dev, "host doesn't support REQ_INIT_DATA handshake\n");
170 }
171 
172 /**
173  * amdgpu_virt_ready_to_reset() - send ready to reset to host
174  * @adev:	amdgpu device.
175  * Send ready to reset message to GPU hypervisor to signal we have stopped GPU
176  * activity and is ready for host FLR
177  */
178 void amdgpu_virt_ready_to_reset(struct amdgpu_device *adev)
179 {
180 	struct amdgpu_virt *virt = &adev->virt;
181 
182 	if (virt->ops && virt->ops->reset_gpu)
183 		virt->ops->ready_to_reset(adev);
184 }
185 
186 /**
187  * amdgpu_virt_wait_reset() - wait for reset gpu completed
188  * @adev:	amdgpu device.
189  * Wait for GPU reset completed.
190  * Return: Zero if reset success, otherwise will return error.
191  */
192 int amdgpu_virt_wait_reset(struct amdgpu_device *adev)
193 {
194 	struct amdgpu_virt *virt = &adev->virt;
195 
196 	if (!virt->ops || !virt->ops->wait_reset)
197 		return -EINVAL;
198 
199 	return virt->ops->wait_reset(adev);
200 }
201 
202 /**
203  * amdgpu_virt_alloc_mm_table() - alloc memory for mm table
204  * @adev:	amdgpu device.
205  * MM table is used by UVD and VCE for its initialization
206  * Return: Zero if allocate success.
207  */
208 int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev)
209 {
210 	int r;
211 
212 	if (!amdgpu_sriov_vf(adev) || adev->virt.mm_table.gpu_addr)
213 		return 0;
214 
215 	r = amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE,
216 				    AMDGPU_GEM_DOMAIN_VRAM |
217 				    AMDGPU_GEM_DOMAIN_GTT,
218 				    &adev->virt.mm_table.bo,
219 				    &adev->virt.mm_table.gpu_addr,
220 				    (void *)&adev->virt.mm_table.cpu_addr);
221 	if (r) {
222 		dev_err(adev->dev, "failed to alloc mm table and error = %d.\n", r);
223 		return r;
224 	}
225 
226 	memset((void *)adev->virt.mm_table.cpu_addr, 0, PAGE_SIZE);
227 	dev_info(adev->dev, "MM table gpu addr = 0x%llx, cpu addr = %p.\n",
228 		 adev->virt.mm_table.gpu_addr,
229 		 adev->virt.mm_table.cpu_addr);
230 	return 0;
231 }
232 
233 /**
234  * amdgpu_virt_free_mm_table() - free mm table memory
235  * @adev:	amdgpu device.
236  * Free MM table memory
237  */
238 void amdgpu_virt_free_mm_table(struct amdgpu_device *adev)
239 {
240 	if (!amdgpu_sriov_vf(adev) || !adev->virt.mm_table.gpu_addr)
241 		return;
242 
243 	amdgpu_bo_free_kernel(&adev->virt.mm_table.bo,
244 			      &adev->virt.mm_table.gpu_addr,
245 			      (void *)&adev->virt.mm_table.cpu_addr);
246 	adev->virt.mm_table.gpu_addr = 0;
247 }
248 
249 /**
250  * amdgpu_virt_rcvd_ras_interrupt() - receive ras interrupt
251  * @adev:	amdgpu device.
252  * Check whether host sent RAS error message
253  * Return: true if found, otherwise false
254  */
255 bool amdgpu_virt_rcvd_ras_interrupt(struct amdgpu_device *adev)
256 {
257 	struct amdgpu_virt *virt = &adev->virt;
258 
259 	if (!virt->ops || !virt->ops->rcvd_ras_intr)
260 		return false;
261 
262 	return virt->ops->rcvd_ras_intr(adev);
263 }
264 
265 
266 unsigned int amd_sriov_msg_checksum(void *obj,
267 				unsigned long obj_size,
268 				unsigned int key,
269 				unsigned int checksum)
270 {
271 	unsigned int ret = key;
272 	unsigned long i = 0;
273 	unsigned char *pos;
274 
275 	pos = (char *)obj;
276 	/* calculate checksum */
277 	for (i = 0; i < obj_size; ++i)
278 		ret += *(pos + i);
279 	/* minus the checksum itself */
280 	pos = (char *)&checksum;
281 	for (i = 0; i < sizeof(checksum); ++i)
282 		ret -= *(pos + i);
283 	return ret;
284 }
285 
286 static int amdgpu_virt_init_ras_err_handler_data(struct amdgpu_device *adev)
287 {
288 	struct amdgpu_virt *virt = &adev->virt;
289 	struct amdgpu_virt_ras_err_handler_data **data = &virt->virt_eh_data;
290 	/* GPU will be marked bad on host if bp count more then 10,
291 	 * so alloc 512 is enough.
292 	 */
293 	unsigned int align_space = 512;
294 	void *bps = NULL;
295 	struct amdgpu_bo **bps_bo = NULL;
296 
297 	*data = kmalloc_obj(struct amdgpu_virt_ras_err_handler_data);
298 	if (!*data)
299 		goto data_failure;
300 
301 	bps = kmalloc_objs(*(*data)->bps, align_space);
302 	if (!bps)
303 		goto bps_failure;
304 
305 	bps_bo = kmalloc_objs(*(*data)->bps_bo, align_space);
306 	if (!bps_bo)
307 		goto bps_bo_failure;
308 
309 	(*data)->bps = bps;
310 	(*data)->bps_bo = bps_bo;
311 	(*data)->count = 0;
312 	(*data)->last_reserved = 0;
313 
314 	virt->ras_init_done = true;
315 
316 	return 0;
317 
318 bps_bo_failure:
319 	kfree(bps);
320 bps_failure:
321 	kfree(*data);
322 data_failure:
323 	return -ENOMEM;
324 }
325 
326 static void amdgpu_virt_ras_release_bp(struct amdgpu_device *adev)
327 {
328 	struct amdgpu_virt *virt = &adev->virt;
329 	struct amdgpu_virt_ras_err_handler_data *data = virt->virt_eh_data;
330 	struct amdgpu_bo *bo;
331 	int i;
332 
333 	if (!data)
334 		return;
335 
336 	for (i = data->last_reserved - 1; i >= 0; i--) {
337 		bo = data->bps_bo[i];
338 		if (bo) {
339 			amdgpu_bo_free_kernel(&bo, NULL, NULL);
340 			data->bps_bo[i] = bo;
341 		}
342 		data->last_reserved = i;
343 	}
344 }
345 
346 void amdgpu_virt_release_ras_err_handler_data(struct amdgpu_device *adev)
347 {
348 	struct amdgpu_virt *virt = &adev->virt;
349 	struct amdgpu_virt_ras_err_handler_data *data = virt->virt_eh_data;
350 
351 	virt->ras_init_done = false;
352 
353 	if (!data)
354 		return;
355 
356 	amdgpu_virt_ras_release_bp(adev);
357 
358 	kfree(data->bps);
359 	kfree(data->bps_bo);
360 	kfree(data);
361 	virt->virt_eh_data = NULL;
362 }
363 
364 static void amdgpu_virt_ras_add_bps(struct amdgpu_device *adev,
365 		struct eeprom_table_record *bps, int pages)
366 {
367 	struct amdgpu_virt *virt = &adev->virt;
368 	struct amdgpu_virt_ras_err_handler_data *data = virt->virt_eh_data;
369 
370 	if (!data)
371 		return;
372 
373 	memcpy(&data->bps[data->count], bps, pages * sizeof(*data->bps));
374 	data->count += pages;
375 }
376 
377 static void amdgpu_virt_ras_reserve_bps(struct amdgpu_device *adev)
378 {
379 	struct amdgpu_virt *virt = &adev->virt;
380 	struct amdgpu_virt_ras_err_handler_data *data = virt->virt_eh_data;
381 	struct amdgpu_vram_mgr *mgr = &adev->mman.vram_mgr;
382 	struct ttm_resource_manager *man = &mgr->manager;
383 	struct amdgpu_bo *bo = NULL;
384 	uint64_t bp;
385 	int i;
386 
387 	if (!data)
388 		return;
389 
390 	for (i = data->last_reserved; i < data->count; i++) {
391 		bp = data->bps[i].retired_page;
392 
393 		/* There are two cases of reserve error should be ignored:
394 		 * 1) a ras bad page has been allocated (used by someone);
395 		 * 2) a ras bad page has been reserved (duplicate error injection
396 		 *    for one page);
397 		 */
398 		if  (ttm_resource_manager_used(man)) {
399 			amdgpu_vram_mgr_reserve_range(&adev->mman.vram_mgr,
400 				bp << AMDGPU_GPU_PAGE_SHIFT,
401 				AMDGPU_GPU_PAGE_SIZE);
402 			data->bps_bo[i] = NULL;
403 		} else {
404 			if (amdgpu_bo_create_kernel_at(adev, bp << AMDGPU_GPU_PAGE_SHIFT,
405 							AMDGPU_GPU_PAGE_SIZE,
406 							&bo, NULL))
407 				dev_dbg(adev->dev,
408 						"RAS WARN: reserve vram for retired page %llx fail\n",
409 						bp);
410 			data->bps_bo[i] = bo;
411 		}
412 		data->last_reserved = i + 1;
413 		bo = NULL;
414 	}
415 }
416 
417 static bool amdgpu_virt_ras_check_bad_page(struct amdgpu_device *adev,
418 		uint64_t retired_page)
419 {
420 	struct amdgpu_virt *virt = &adev->virt;
421 	struct amdgpu_virt_ras_err_handler_data *data = virt->virt_eh_data;
422 	int i;
423 
424 	if (!data)
425 		return true;
426 
427 	for (i = 0; i < data->count; i++)
428 		if (retired_page == data->bps[i].retired_page)
429 			return true;
430 
431 	return false;
432 }
433 
434 static void amdgpu_virt_add_bad_page(struct amdgpu_device *adev,
435 		uint64_t bp_block_offset, uint32_t bp_block_size)
436 {
437 	struct eeprom_table_record bp;
438 	uint64_t retired_page;
439 	uint32_t bp_idx, bp_cnt;
440 	void *fw_va = adev->mman.resv_region[AMDGPU_RESV_FW_VRAM_USAGE].cpu_ptr;
441 	void *drv_va = adev->mman.resv_region[AMDGPU_RESV_DRV_VRAM_USAGE].cpu_ptr;
442 	void *vram_usage_va = fw_va ? fw_va : drv_va;
443 
444 	memset(&bp, 0, sizeof(bp));
445 
446 	if (bp_block_size) {
447 		bp_cnt = bp_block_size / sizeof(uint64_t);
448 		for (bp_idx = 0; bp_idx < bp_cnt; bp_idx++) {
449 			retired_page = *(uint64_t *)(vram_usage_va +
450 					bp_block_offset + bp_idx * sizeof(uint64_t));
451 			bp.retired_page = retired_page;
452 
453 			if (amdgpu_virt_ras_check_bad_page(adev, retired_page))
454 				continue;
455 
456 			amdgpu_virt_ras_add_bps(adev, &bp, 1);
457 
458 			amdgpu_virt_ras_reserve_bps(adev);
459 		}
460 	}
461 }
462 
463 static int amdgpu_virt_read_pf2vf_data(struct amdgpu_device *adev)
464 {
465 	struct amd_sriov_msg_pf2vf_info_header *pf2vf_info = adev->virt.fw_reserve.p_pf2vf;
466 	uint32_t checksum;
467 	uint32_t checkval;
468 
469 	uint32_t i;
470 	uint32_t tmp;
471 
472 	if (adev->virt.fw_reserve.p_pf2vf == NULL)
473 		return -EINVAL;
474 
475 	if (pf2vf_info->size > 1024) {
476 		dev_err(adev->dev, "invalid pf2vf message size: 0x%x\n", pf2vf_info->size);
477 		return -EINVAL;
478 	}
479 
480 	switch (pf2vf_info->version) {
481 	case 1:
482 		checksum = ((struct amdgim_pf2vf_info_v1 *)pf2vf_info)->checksum;
483 		checkval = amd_sriov_msg_checksum(
484 			adev->virt.fw_reserve.p_pf2vf, pf2vf_info->size,
485 			adev->virt.fw_reserve.checksum_key, checksum);
486 		if (checksum != checkval) {
487 			dev_err(adev->dev,
488 				"invalid pf2vf message: header checksum=0x%x calculated checksum=0x%x\n",
489 				checksum, checkval);
490 			return -EINVAL;
491 		}
492 
493 		adev->virt.gim_feature =
494 			((struct amdgim_pf2vf_info_v1 *)pf2vf_info)->feature_flags;
495 		break;
496 	case 2:
497 		/* TODO: missing key, need to add it later */
498 		checksum = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->checksum;
499 		checkval = amd_sriov_msg_checksum(
500 			adev->virt.fw_reserve.p_pf2vf, pf2vf_info->size,
501 			0, checksum);
502 		if (checksum != checkval) {
503 			dev_err(adev->dev,
504 				"invalid pf2vf message: header checksum=0x%x calculated checksum=0x%x\n",
505 				checksum, checkval);
506 			return -EINVAL;
507 		}
508 
509 		adev->virt.vf2pf_update_interval_ms =
510 			((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->vf2pf_update_interval_ms;
511 		adev->virt.gim_feature =
512 			((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->feature_flags.all;
513 		adev->virt.reg_access =
514 			((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->reg_access_flags.all;
515 
516 		adev->virt.decode_max_dimension_pixels = 0;
517 		adev->virt.decode_max_frame_pixels = 0;
518 		adev->virt.encode_max_dimension_pixels = 0;
519 		adev->virt.encode_max_frame_pixels = 0;
520 		adev->virt.is_mm_bw_enabled = false;
521 		for (i = 0; i < AMD_SRIOV_MSG_RESERVE_VCN_INST; i++) {
522 			tmp = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->mm_bw_management[i].decode_max_dimension_pixels;
523 			adev->virt.decode_max_dimension_pixels = max(tmp, adev->virt.decode_max_dimension_pixels);
524 
525 			tmp = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->mm_bw_management[i].decode_max_frame_pixels;
526 			adev->virt.decode_max_frame_pixels = max(tmp, adev->virt.decode_max_frame_pixels);
527 
528 			tmp = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->mm_bw_management[i].encode_max_dimension_pixels;
529 			adev->virt.encode_max_dimension_pixels = max(tmp, adev->virt.encode_max_dimension_pixels);
530 
531 			tmp = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->mm_bw_management[i].encode_max_frame_pixels;
532 			adev->virt.encode_max_frame_pixels = max(tmp, adev->virt.encode_max_frame_pixels);
533 		}
534 		if ((adev->virt.decode_max_dimension_pixels > 0) || (adev->virt.encode_max_dimension_pixels > 0))
535 			adev->virt.is_mm_bw_enabled = true;
536 
537 		adev->unique_id =
538 			((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->uuid;
539 		adev->virt.ras_en_caps.all = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->ras_en_caps.all;
540 		adev->virt.ras_telemetry_en_caps.all =
541 			((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->ras_telemetry_en_caps.all;
542 		break;
543 	default:
544 		dev_err(adev->dev, "invalid pf2vf version: 0x%x\n", pf2vf_info->version);
545 		return -EINVAL;
546 	}
547 
548 	/* correct too large or too little interval value */
549 	if (adev->virt.vf2pf_update_interval_ms < 200 || adev->virt.vf2pf_update_interval_ms > 10000)
550 		adev->virt.vf2pf_update_interval_ms = 2000;
551 
552 	return 0;
553 }
554 
555 static void amdgpu_virt_populate_vf2pf_ucode_info(struct amdgpu_device *adev)
556 {
557 	struct amd_sriov_msg_vf2pf_info *vf2pf_info;
558 	vf2pf_info = (struct amd_sriov_msg_vf2pf_info *) adev->virt.fw_reserve.p_vf2pf;
559 
560 	if (adev->virt.fw_reserve.p_vf2pf == NULL)
561 		return;
562 
563 	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_VCE,      adev->vce.fw_version);
564 	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_UVD,      adev->uvd.fw_version);
565 	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_MC,       adev->gmc.fw_version);
566 	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_ME,       adev->gfx.me_fw_version);
567 	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_PFP,      adev->gfx.pfp_fw_version);
568 	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_CE,       adev->gfx.ce_fw_version);
569 	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_RLC,      adev->gfx.rlc_fw_version);
570 	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_RLC_SRLC, adev->gfx.rlc_srlc_fw_version);
571 	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_RLC_SRLG, adev->gfx.rlc_srlg_fw_version);
572 	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_RLC_SRLS, adev->gfx.rlc_srls_fw_version);
573 	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_MEC,      adev->gfx.mec_fw_version);
574 	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_MEC2,     adev->gfx.mec2_fw_version);
575 	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SOS,      adev->psp.sos.fw_version);
576 	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_ASD,
577 			    adev->psp.asd_context.bin_desc.fw_version);
578 	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_TA_RAS,
579 			    adev->psp.ras_context.context.bin_desc.fw_version);
580 	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_TA_XGMI,
581 			    adev->psp.xgmi_context.context.bin_desc.fw_version);
582 	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SMC,      adev->pm.fw_version);
583 	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SDMA,     adev->sdma.instance[0].fw_version);
584 	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SDMA2,    adev->sdma.instance[1].fw_version);
585 	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_VCN,      adev->vcn.fw_version);
586 	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_DMCU,     adev->dm.dmcu_fw_version);
587 }
588 
589 static int amdgpu_virt_write_vf2pf_data(struct amdgpu_device *adev)
590 {
591 	struct amd_sriov_msg_vf2pf_info *vf2pf_info;
592 
593 	vf2pf_info = (struct amd_sriov_msg_vf2pf_info *) adev->virt.fw_reserve.p_vf2pf;
594 
595 	if (adev->virt.fw_reserve.p_vf2pf == NULL)
596 		return -EINVAL;
597 
598 	memset(vf2pf_info, 0, sizeof(struct amd_sriov_msg_vf2pf_info));
599 
600 	vf2pf_info->header.size = sizeof(struct amd_sriov_msg_vf2pf_info);
601 	vf2pf_info->header.version = AMD_SRIOV_MSG_FW_VRAM_VF2PF_VER;
602 
603 #ifdef MODULE
604 	if (THIS_MODULE->version != NULL)
605 		strscpy(vf2pf_info->driver_version, THIS_MODULE->version);
606 	else
607 #endif
608 		strscpy(vf2pf_info->driver_version, "N/A");
609 
610 	vf2pf_info->pf2vf_version_required = 0; // no requirement, guest understands all
611 	vf2pf_info->driver_cert = 0;
612 	vf2pf_info->os_info.all = 0;
613 
614 	vf2pf_info->fb_usage = ttm_resource_manager_used(&adev->mman.vram_mgr.manager) ?
615 		 ttm_resource_manager_usage(&adev->mman.vram_mgr.manager) >> 20 : 0;
616 	vf2pf_info->fb_vis_usage =
617 		amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr) >> 20;
618 	vf2pf_info->fb_size = adev->gmc.real_vram_size >> 20;
619 	vf2pf_info->fb_vis_size = adev->gmc.visible_vram_size >> 20;
620 
621 	amdgpu_virt_populate_vf2pf_ucode_info(adev);
622 
623 	/* TODO: read dynamic info */
624 	vf2pf_info->gfx_usage = 0;
625 	vf2pf_info->compute_usage = 0;
626 	vf2pf_info->encode_usage = 0;
627 	vf2pf_info->decode_usage = 0;
628 
629 	vf2pf_info->dummy_page_addr = (uint64_t)adev->dummy_page_addr;
630 	if (amdgpu_sriov_is_mes_info_enable(adev)) {
631 		vf2pf_info->mes_info_addr =
632 			(uint64_t)(adev->mes.resource_1_gpu_addr[0] + AMDGPU_GPU_PAGE_SIZE);
633 		vf2pf_info->mes_info_size =
634 			adev->mes.resource_1[0]->tbo.base.size - AMDGPU_GPU_PAGE_SIZE;
635 	}
636 	vf2pf_info->checksum =
637 		amd_sriov_msg_checksum(
638 		vf2pf_info, sizeof(*vf2pf_info), 0, 0);
639 
640 	return 0;
641 }
642 
643 static void amdgpu_virt_update_vf2pf_work_item(struct work_struct *work)
644 {
645 	struct amdgpu_device *adev = container_of(work, struct amdgpu_device, virt.vf2pf_work.work);
646 	int ret;
647 
648 	ret = amdgpu_virt_read_pf2vf_data(adev);
649 	if (ret) {
650 		adev->virt.vf2pf_update_retry_cnt++;
651 
652 		if ((amdgpu_virt_rcvd_ras_interrupt(adev) ||
653 			adev->virt.vf2pf_update_retry_cnt >= AMDGPU_VF2PF_UPDATE_MAX_RETRY_LIMIT) &&
654 			amdgpu_sriov_runtime(adev)) {
655 
656 			amdgpu_ras_set_fed(adev, true);
657 			if (amdgpu_reset_domain_schedule(adev->reset_domain,
658 							&adev->kfd.reset_work))
659 				return;
660 			else
661 				dev_err(adev->dev, "Failed to queue work! at %s", __func__);
662 		}
663 
664 		goto out;
665 	}
666 
667 	adev->virt.vf2pf_update_retry_cnt = 0;
668 	amdgpu_virt_write_vf2pf_data(adev);
669 
670 out:
671 	schedule_delayed_work(&(adev->virt.vf2pf_work), adev->virt.vf2pf_update_interval_ms);
672 }
673 
674 static int amdgpu_virt_read_exchange_data_from_mem(struct amdgpu_device *adev, uint32_t *pfvf_data)
675 {
676 	uint32_t dataexchange_offset =
677 		adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_DATAEXCHANGE_TABLE_ID].offset;
678 	uint32_t dataexchange_size =
679 		adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_DATAEXCHANGE_TABLE_ID].size_kb << 10;
680 	uint64_t pos = 0;
681 
682 	dev_info(adev->dev,
683 			"Got data exchange info from dynamic crit_region_table at offset 0x%x with size of 0x%x bytes.\n",
684 			dataexchange_offset, dataexchange_size);
685 
686 	if (!IS_ALIGNED(dataexchange_offset, 4) || !IS_ALIGNED(dataexchange_size, 4)) {
687 		dev_err(adev->dev, "Data exchange data not aligned to 4 bytes\n");
688 		return -EINVAL;
689 	}
690 
691 	pos = (uint64_t)dataexchange_offset;
692 	amdgpu_device_vram_access(adev, pos, pfvf_data,
693 			dataexchange_size, false);
694 
695 	return 0;
696 }
697 
698 void amdgpu_virt_fini_data_exchange(struct amdgpu_device *adev)
699 {
700 	if (adev->virt.vf2pf_update_interval_ms != 0) {
701 		dev_info(adev->dev, "clean up the vf2pf work item\n");
702 		cancel_delayed_work_sync(&adev->virt.vf2pf_work);
703 		adev->virt.vf2pf_update_interval_ms = 0;
704 	}
705 }
706 
707 void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
708 {
709 	uint32_t *pfvf_data = NULL;
710 	void *fw_va = adev->mman.resv_region[AMDGPU_RESV_FW_VRAM_USAGE].cpu_ptr;
711 	void *drv_va = adev->mman.resv_region[AMDGPU_RESV_DRV_VRAM_USAGE].cpu_ptr;
712 
713 	adev->virt.fw_reserve.p_pf2vf = NULL;
714 	adev->virt.fw_reserve.p_vf2pf = NULL;
715 	adev->virt.vf2pf_update_interval_ms = 0;
716 	adev->virt.vf2pf_update_retry_cnt = 0;
717 
718 	if (fw_va && drv_va) {
719 		dev_warn(adev->dev, "Currently fw_vram and drv_vram should not have values at the same time!");
720 	} else if (fw_va || drv_va) {
721 		/* go through this logic in ip_init and reset to init workqueue*/
722 		amdgpu_virt_exchange_data(adev);
723 
724 		INIT_DELAYED_WORK(&adev->virt.vf2pf_work, amdgpu_virt_update_vf2pf_work_item);
725 		schedule_delayed_work(&(adev->virt.vf2pf_work), msecs_to_jiffies(adev->virt.vf2pf_update_interval_ms));
726 	} else if (adev->bios != NULL) {
727 		/* got through this logic in early init stage to get necessary flags, e.g. rlcg_acc related*/
728 		if (adev->virt.req_init_data_ver == GPU_CRIT_REGION_V2) {
729 			pfvf_data =
730 				kzalloc(adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_DATAEXCHANGE_TABLE_ID].size_kb << 10,
731 					GFP_KERNEL);
732 			if (!pfvf_data) {
733 				dev_err(adev->dev, "Failed to allocate memory for pfvf_data\n");
734 				return;
735 			}
736 
737 			if (amdgpu_virt_read_exchange_data_from_mem(adev, pfvf_data))
738 				goto free_pfvf_data;
739 
740 			adev->virt.fw_reserve.p_pf2vf =
741 				(struct amd_sriov_msg_pf2vf_info_header *)pfvf_data;
742 
743 			amdgpu_virt_read_pf2vf_data(adev);
744 
745 free_pfvf_data:
746 			kfree(pfvf_data);
747 			pfvf_data = NULL;
748 			adev->virt.fw_reserve.p_pf2vf = NULL;
749 		} else {
750 			adev->virt.fw_reserve.p_pf2vf =
751 				(struct amd_sriov_msg_pf2vf_info_header *)
752 				(adev->bios + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB_V1 << 10));
753 
754 			amdgpu_virt_read_pf2vf_data(adev);
755 		}
756 	}
757 }
758 
759 
760 void amdgpu_virt_exchange_data(struct amdgpu_device *adev)
761 {
762 	uint64_t bp_block_offset = 0;
763 	uint32_t bp_block_size = 0;
764 	struct amd_sriov_msg_pf2vf_info *pf2vf_v2 = NULL;
765 	void *fw_va = adev->mman.resv_region[AMDGPU_RESV_FW_VRAM_USAGE].cpu_ptr;
766 	void *drv_va = adev->mman.resv_region[AMDGPU_RESV_DRV_VRAM_USAGE].cpu_ptr;
767 
768 	if (fw_va || drv_va) {
769 		if (fw_va) {
770 			if (adev->virt.req_init_data_ver == GPU_CRIT_REGION_V2) {
771 				adev->virt.fw_reserve.p_pf2vf =
772 					(struct amd_sriov_msg_pf2vf_info_header *)
773 					(fw_va +
774 					adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_DATAEXCHANGE_TABLE_ID].offset);
775 				adev->virt.fw_reserve.p_vf2pf =
776 					(struct amd_sriov_msg_vf2pf_info_header *)
777 					(fw_va +
778 					adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_DATAEXCHANGE_TABLE_ID].offset +
779 					(AMD_SRIOV_MSG_SIZE_KB << 10));
780 				adev->virt.fw_reserve.ras_telemetry =
781 					(fw_va +
782 					adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_RAS_TELEMETRY_TABLE_ID].offset);
783 			} else {
784 				adev->virt.fw_reserve.p_pf2vf =
785 					(struct amd_sriov_msg_pf2vf_info_header *)
786 					(fw_va + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB_V1 << 10));
787 				adev->virt.fw_reserve.p_vf2pf =
788 					(struct amd_sriov_msg_vf2pf_info_header *)
789 					(fw_va + (AMD_SRIOV_MSG_VF2PF_OFFSET_KB_V1 << 10));
790 				adev->virt.fw_reserve.ras_telemetry =
791 					(fw_va + (AMD_SRIOV_MSG_RAS_TELEMETRY_OFFSET_KB_V1 << 10));
792 			}
793 		} else if (drv_va) {
794 			adev->virt.fw_reserve.p_pf2vf =
795 				(struct amd_sriov_msg_pf2vf_info_header *)
796 				(drv_va + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB_V1 << 10));
797 			adev->virt.fw_reserve.p_vf2pf =
798 				(struct amd_sriov_msg_vf2pf_info_header *)
799 				(drv_va + (AMD_SRIOV_MSG_VF2PF_OFFSET_KB_V1 << 10));
800 			adev->virt.fw_reserve.ras_telemetry =
801 				(drv_va + (AMD_SRIOV_MSG_RAS_TELEMETRY_OFFSET_KB_V1 << 10));
802 		}
803 
804 		amdgpu_virt_read_pf2vf_data(adev);
805 		amdgpu_virt_write_vf2pf_data(adev);
806 
807 		/* bad page handling for version 2 */
808 		if (adev->virt.fw_reserve.p_pf2vf->version == 2) {
809 			pf2vf_v2 = (struct amd_sriov_msg_pf2vf_info *)adev->virt.fw_reserve.p_pf2vf;
810 
811 			bp_block_offset = ((uint64_t)pf2vf_v2->bp_block_offset_low & 0xFFFFFFFF) |
812 				((((uint64_t)pf2vf_v2->bp_block_offset_high) << 32) & 0xFFFFFFFF00000000);
813 			bp_block_size = pf2vf_v2->bp_block_size;
814 
815 			if (bp_block_size && !adev->virt.ras_init_done)
816 				amdgpu_virt_init_ras_err_handler_data(adev);
817 
818 			if (adev->virt.ras_init_done)
819 				amdgpu_virt_add_bad_page(adev, bp_block_offset, bp_block_size);
820 		}
821 	}
822 }
823 
824 static u32 amdgpu_virt_init_detect_asic(struct amdgpu_device *adev)
825 {
826 	uint32_t reg;
827 
828 	switch (adev->asic_type) {
829 	case CHIP_TONGA:
830 	case CHIP_FIJI:
831 		reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER);
832 		break;
833 	case CHIP_VEGA10:
834 	case CHIP_VEGA20:
835 	case CHIP_NAVI10:
836 	case CHIP_NAVI12:
837 	case CHIP_SIENNA_CICHLID:
838 	case CHIP_ARCTURUS:
839 	case CHIP_ALDEBARAN:
840 	case CHIP_IP_DISCOVERY:
841 		reg = RREG32(mmRCC_IOV_FUNC_IDENTIFIER);
842 		break;
843 	default: /* other chip doesn't support SRIOV */
844 		reg = 0;
845 		break;
846 	}
847 
848 	if (reg & 1)
849 		adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF;
850 
851 	if (reg & 0x80000000)
852 		adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV;
853 
854 	if (!reg) {
855 		/* passthrough mode exclus sriov mod */
856 		if (is_virtual_machine() && !xen_initial_domain())
857 			adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
858 	}
859 
860 	return reg;
861 }
862 
863 static bool amdgpu_virt_init_req_data(struct amdgpu_device *adev, u32 reg)
864 {
865 	bool is_sriov = false;
866 
867 	/* we have the ability to check now */
868 	if (amdgpu_sriov_vf(adev)) {
869 		is_sriov = true;
870 
871 		switch (adev->asic_type) {
872 		case CHIP_TONGA:
873 		case CHIP_FIJI:
874 			vi_set_virt_ops(adev);
875 			break;
876 		case CHIP_VEGA10:
877 			soc15_set_virt_ops(adev);
878 #ifdef CONFIG_X86
879 			/* not send GPU_INIT_DATA with MS_HYPERV*/
880 			if (!hypervisor_is_type(X86_HYPER_MS_HYPERV))
881 #endif
882 				/* send a dummy GPU_INIT_DATA request to host on vega10 */
883 				amdgpu_virt_request_init_data(adev);
884 			break;
885 		case CHIP_VEGA20:
886 		case CHIP_ARCTURUS:
887 		case CHIP_ALDEBARAN:
888 			soc15_set_virt_ops(adev);
889 			break;
890 		case CHIP_NAVI10:
891 		case CHIP_NAVI12:
892 		case CHIP_SIENNA_CICHLID:
893 		case CHIP_IP_DISCOVERY:
894 			nv_set_virt_ops(adev);
895 			/* try send GPU_INIT_DATA request to host */
896 			amdgpu_virt_request_init_data(adev);
897 			break;
898 		default: /* other chip doesn't support SRIOV */
899 			is_sriov = false;
900 			dev_err(adev->dev, "Unknown asic type: %d!\n", adev->asic_type);
901 			break;
902 		}
903 	}
904 
905 	return is_sriov;
906 }
907 
908 static void amdgpu_virt_init_ras(struct amdgpu_device *adev)
909 {
910 	ratelimit_state_init(&adev->virt.ras.ras_error_cnt_rs, 5 * HZ, 1);
911 	ratelimit_state_init(&adev->virt.ras.ras_cper_dump_rs, 5 * HZ, 1);
912 	ratelimit_state_init(&adev->virt.ras.ras_chk_criti_rs, 5 * HZ, 1);
913 
914 	ratelimit_set_flags(&adev->virt.ras.ras_error_cnt_rs,
915 			    RATELIMIT_MSG_ON_RELEASE);
916 	ratelimit_set_flags(&adev->virt.ras.ras_cper_dump_rs,
917 			    RATELIMIT_MSG_ON_RELEASE);
918 	ratelimit_set_flags(&adev->virt.ras.ras_chk_criti_rs,
919 			    RATELIMIT_MSG_ON_RELEASE);
920 
921 	mutex_init(&adev->virt.ras.ras_telemetry_mutex);
922 	mutex_init(&adev->virt.access_req_mutex);
923 
924 	adev->virt.ras.cper_rptr = 0;
925 }
926 
927 static uint8_t amdgpu_virt_crit_region_calc_checksum(uint8_t *buf_start, uint8_t *buf_end)
928 {
929 	uint32_t sum = 0;
930 
931 	if (buf_start >= buf_end)
932 		return 0;
933 
934 	for (; buf_start < buf_end; buf_start++)
935 		sum += buf_start[0];
936 
937 	return 0xffffffff - sum;
938 }
939 
940 int amdgpu_virt_init_critical_region(struct amdgpu_device *adev)
941 {
942 	struct amd_sriov_msg_init_data_header *init_data_hdr = NULL;
943 	u64 init_hdr_offset = adev->virt.init_data_header.offset;
944 	u64 init_hdr_size = (u64)adev->virt.init_data_header.size_kb << 10; /* KB → bytes */
945 	u64 vram_size;
946 	u64 end;
947 	int r = 0;
948 	uint8_t checksum = 0;
949 
950 	/* Skip below init if critical region version != v2 */
951 	if (adev->virt.req_init_data_ver != GPU_CRIT_REGION_V2)
952 		return 0;
953 
954 	vram_size = RREG32(mmRCC_CONFIG_MEMSIZE);
955 	if (!vram_size || vram_size == U32_MAX)
956 		return -EINVAL;
957 	vram_size <<= 20;
958 
959 	if (check_add_overflow(init_hdr_offset, init_hdr_size, &end) || end > vram_size) {
960 		dev_err(adev->dev, "init_data_header exceeds VRAM size, exiting\n");
961 		return -EINVAL;
962 	}
963 
964 	/* Allocate for init_data_hdr */
965 	init_data_hdr = kzalloc_obj(struct amd_sriov_msg_init_data_header);
966 	if (!init_data_hdr)
967 		return -ENOMEM;
968 
969 	amdgpu_device_vram_access(adev, (uint64_t)init_hdr_offset, (uint32_t *)init_data_hdr,
970 					sizeof(struct amd_sriov_msg_init_data_header), false);
971 
972 	/* Table validation */
973 	if (strncmp(init_data_hdr->signature,
974 				AMDGPU_SRIOV_CRIT_DATA_SIGNATURE,
975 				AMDGPU_SRIOV_CRIT_DATA_SIG_LEN) != 0) {
976 		dev_err(adev->dev, "Invalid init data signature: %.4s\n",
977 			init_data_hdr->signature);
978 		r = -EINVAL;
979 		goto out;
980 	}
981 
982 	checksum = amdgpu_virt_crit_region_calc_checksum(
983 			(uint8_t *)&init_data_hdr->initdata_offset,
984 			(uint8_t *)init_data_hdr +
985 			sizeof(struct amd_sriov_msg_init_data_header));
986 	if (checksum != init_data_hdr->checksum) {
987 		dev_err(adev->dev, "Found unmatching checksum from calculation 0x%x and init_data 0x%x\n",
988 				checksum, init_data_hdr->checksum);
989 		r = -EINVAL;
990 		goto out;
991 	}
992 
993 	memset(&adev->virt.crit_regn, 0, sizeof(adev->virt.crit_regn));
994 	memset(adev->virt.crit_regn_tbl, 0, sizeof(adev->virt.crit_regn_tbl));
995 
996 	adev->virt.crit_regn.offset = init_data_hdr->initdata_offset;
997 	adev->virt.crit_regn.size_kb = init_data_hdr->initdata_size_in_kb;
998 
999 	/* Validation and initialization for each table entry */
1000 	if (IS_SRIOV_CRIT_REGN_ENTRY_VALID(init_data_hdr, AMD_SRIOV_MSG_IPD_TABLE_ID)) {
1001 		if (!init_data_hdr->ip_discovery_size_in_kb ||
1002 				init_data_hdr->ip_discovery_size_in_kb > DISCOVERY_TMR_SIZE) {
1003 			dev_err(adev->dev, "Invalid %s size: 0x%x\n",
1004 				amdgpu_virt_dynamic_crit_table_name[AMD_SRIOV_MSG_IPD_TABLE_ID],
1005 				init_data_hdr->ip_discovery_size_in_kb);
1006 			r = -EINVAL;
1007 			goto out;
1008 		}
1009 
1010 		adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_IPD_TABLE_ID].offset =
1011 			init_data_hdr->ip_discovery_offset;
1012 		adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_IPD_TABLE_ID].size_kb =
1013 			init_data_hdr->ip_discovery_size_in_kb;
1014 	}
1015 
1016 	if (IS_SRIOV_CRIT_REGN_ENTRY_VALID(init_data_hdr, AMD_SRIOV_MSG_VBIOS_IMG_TABLE_ID)) {
1017 		if (!init_data_hdr->vbios_img_size_in_kb) {
1018 			dev_err(adev->dev, "Invalid %s size: 0x%x\n",
1019 				amdgpu_virt_dynamic_crit_table_name[AMD_SRIOV_MSG_VBIOS_IMG_TABLE_ID],
1020 				init_data_hdr->vbios_img_size_in_kb);
1021 			r = -EINVAL;
1022 			goto out;
1023 		}
1024 
1025 		adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_VBIOS_IMG_TABLE_ID].offset =
1026 			init_data_hdr->vbios_img_offset;
1027 		adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_VBIOS_IMG_TABLE_ID].size_kb =
1028 			init_data_hdr->vbios_img_size_in_kb;
1029 	}
1030 
1031 	if (IS_SRIOV_CRIT_REGN_ENTRY_VALID(init_data_hdr, AMD_SRIOV_MSG_RAS_TELEMETRY_TABLE_ID)) {
1032 		if (!init_data_hdr->ras_tele_info_size_in_kb) {
1033 			dev_err(adev->dev, "Invalid %s size: 0x%x\n",
1034 				amdgpu_virt_dynamic_crit_table_name[AMD_SRIOV_MSG_RAS_TELEMETRY_TABLE_ID],
1035 				init_data_hdr->ras_tele_info_size_in_kb);
1036 			r = -EINVAL;
1037 			goto out;
1038 		}
1039 
1040 		adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_RAS_TELEMETRY_TABLE_ID].offset =
1041 			init_data_hdr->ras_tele_info_offset;
1042 		adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_RAS_TELEMETRY_TABLE_ID].size_kb =
1043 			init_data_hdr->ras_tele_info_size_in_kb;
1044 	}
1045 
1046 	if (IS_SRIOV_CRIT_REGN_ENTRY_VALID(init_data_hdr, AMD_SRIOV_MSG_DATAEXCHANGE_TABLE_ID)) {
1047 		if (!init_data_hdr->dataexchange_size_in_kb) {
1048 			dev_err(adev->dev, "Invalid %s size: 0x%x\n",
1049 				amdgpu_virt_dynamic_crit_table_name[AMD_SRIOV_MSG_DATAEXCHANGE_TABLE_ID],
1050 				init_data_hdr->dataexchange_size_in_kb);
1051 			r = -EINVAL;
1052 			goto out;
1053 		}
1054 
1055 		adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_DATAEXCHANGE_TABLE_ID].offset =
1056 			init_data_hdr->dataexchange_offset;
1057 		adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_DATAEXCHANGE_TABLE_ID].size_kb =
1058 			init_data_hdr->dataexchange_size_in_kb;
1059 	}
1060 
1061 	if (IS_SRIOV_CRIT_REGN_ENTRY_VALID(init_data_hdr, AMD_SRIOV_MSG_BAD_PAGE_INFO_TABLE_ID)) {
1062 		if (!init_data_hdr->bad_page_size_in_kb) {
1063 			dev_err(adev->dev, "Invalid %s size: 0x%x\n",
1064 				amdgpu_virt_dynamic_crit_table_name[AMD_SRIOV_MSG_BAD_PAGE_INFO_TABLE_ID],
1065 				init_data_hdr->bad_page_size_in_kb);
1066 			r = -EINVAL;
1067 			goto out;
1068 		}
1069 
1070 		adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_BAD_PAGE_INFO_TABLE_ID].offset =
1071 			init_data_hdr->bad_page_info_offset;
1072 		adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_BAD_PAGE_INFO_TABLE_ID].size_kb =
1073 			init_data_hdr->bad_page_size_in_kb;
1074 	}
1075 
1076 	/* Validation for critical region info */
1077 	if (adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_IPD_TABLE_ID].size_kb > DISCOVERY_TMR_SIZE) {
1078 		dev_err(adev->dev, "Invalid IP discovery size: 0x%x\n",
1079 				adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_IPD_TABLE_ID].size_kb);
1080 		r = -EINVAL;
1081 		goto out;
1082 	}
1083 
1084 	/* reserved memory starts from crit region base offset with the size of 5MB */
1085 	amdgpu_ttm_init_vram_resv(adev, AMDGPU_RESV_FW_VRAM_USAGE,
1086 				  adev->virt.crit_regn.offset,
1087 				  adev->virt.crit_regn.size_kb << 10, true);
1088 	dev_info(adev->dev,
1089 		"critical region v%d requested to reserve memory start at %08llx with %llu KB.\n",
1090 			init_data_hdr->version,
1091 			adev->mman.resv_region[AMDGPU_RESV_FW_VRAM_USAGE].offset,
1092 			adev->mman.resv_region[AMDGPU_RESV_FW_VRAM_USAGE].size >> 10);
1093 
1094 	adev->virt.is_dynamic_crit_regn_enabled = true;
1095 
1096 out:
1097 	kfree(init_data_hdr);
1098 	init_data_hdr = NULL;
1099 
1100 	return r;
1101 }
1102 
1103 int amdgpu_virt_get_dynamic_data_info(struct amdgpu_device *adev,
1104 	int data_id, uint8_t *binary, u32 *size)
1105 {
1106 	uint32_t data_offset = 0;
1107 	uint32_t data_size = 0;
1108 	enum amd_sriov_msg_table_id_enum data_table_id = data_id;
1109 
1110 	if (data_table_id >= AMD_SRIOV_MSG_MAX_TABLE_ID)
1111 		return -EINVAL;
1112 
1113 	data_offset = adev->virt.crit_regn_tbl[data_table_id].offset;
1114 	data_size = adev->virt.crit_regn_tbl[data_table_id].size_kb << 10;
1115 
1116 	/* Validate on input params */
1117 	if (!binary || !size || *size < (uint64_t)data_size)
1118 		return -EINVAL;
1119 
1120 	/* Proceed to copy the dynamic content */
1121 	amdgpu_device_vram_access(adev,
1122 			(uint64_t)data_offset, (uint32_t *)binary, data_size, false);
1123 	*size = (uint64_t)data_size;
1124 
1125 	dev_dbg(adev->dev,
1126 		"Got %s info from dynamic crit_region_table at offset 0x%x with size of 0x%x bytes.\n",
1127 		amdgpu_virt_dynamic_crit_table_name[data_id], data_offset, data_size);
1128 
1129 	return 0;
1130 }
1131 
1132 void amdgpu_virt_init(struct amdgpu_device *adev)
1133 {
1134 	bool is_sriov = false;
1135 	uint32_t reg = amdgpu_virt_init_detect_asic(adev);
1136 
1137 	is_sriov = amdgpu_virt_init_req_data(adev, reg);
1138 
1139 	if (is_sriov)
1140 		amdgpu_virt_init_ras(adev);
1141 }
1142 
1143 static bool amdgpu_virt_access_debugfs_is_mmio(struct amdgpu_device *adev)
1144 {
1145 	return amdgpu_sriov_is_debug(adev) ? true : false;
1146 }
1147 
1148 static bool amdgpu_virt_access_debugfs_is_kiq(struct amdgpu_device *adev)
1149 {
1150 	return amdgpu_sriov_is_normal(adev) ? true : false;
1151 }
1152 
1153 int amdgpu_virt_enable_access_debugfs(struct amdgpu_device *adev)
1154 {
1155 	if (!amdgpu_sriov_vf(adev) ||
1156 	    amdgpu_virt_access_debugfs_is_kiq(adev))
1157 		return 0;
1158 
1159 	if (amdgpu_virt_access_debugfs_is_mmio(adev))
1160 		adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
1161 	else
1162 		return -EPERM;
1163 
1164 	return 0;
1165 }
1166 
1167 void amdgpu_virt_disable_access_debugfs(struct amdgpu_device *adev)
1168 {
1169 	if (amdgpu_sriov_vf(adev))
1170 		adev->virt.caps |= AMDGPU_SRIOV_CAPS_RUNTIME;
1171 }
1172 
1173 enum amdgpu_sriov_vf_mode amdgpu_virt_get_sriov_vf_mode(struct amdgpu_device *adev)
1174 {
1175 	enum amdgpu_sriov_vf_mode mode;
1176 
1177 	if (amdgpu_sriov_vf(adev)) {
1178 		if (amdgpu_sriov_is_pp_one_vf(adev))
1179 			mode = SRIOV_VF_MODE_ONE_VF;
1180 		else
1181 			mode = SRIOV_VF_MODE_MULTI_VF;
1182 	} else {
1183 		mode = SRIOV_VF_MODE_BARE_METAL;
1184 	}
1185 
1186 	return mode;
1187 }
1188 
1189 void amdgpu_virt_pre_reset(struct amdgpu_device *adev)
1190 {
1191 	/* stop the data exchange thread */
1192 	amdgpu_virt_fini_data_exchange(adev);
1193 	amdgpu_dpm_set_mp1_state(adev, PP_MP1_STATE_FLR);
1194 }
1195 
1196 void amdgpu_virt_post_reset(struct amdgpu_device *adev)
1197 {
1198 	if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 0, 3)) {
1199 		/* force set to GFXOFF state after reset,
1200 		 * to avoid some invalid operation before GC enable
1201 		 */
1202 		adev->gfx.is_poweron = false;
1203 	}
1204 
1205 	adev->mes.ring[0].sched.ready = false;
1206 }
1207 
1208 bool amdgpu_virt_fw_load_skip_check(struct amdgpu_device *adev, uint32_t ucode_id)
1209 {
1210 	switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
1211 	case IP_VERSION(13, 0, 0):
1212 		/* no vf autoload, white list */
1213 		if (ucode_id == AMDGPU_UCODE_ID_VCN1 ||
1214 		    ucode_id == AMDGPU_UCODE_ID_VCN)
1215 			return false;
1216 		else
1217 			return true;
1218 	case IP_VERSION(11, 0, 9):
1219 	case IP_VERSION(11, 0, 7):
1220 		/* black list for CHIP_NAVI12 and CHIP_SIENNA_CICHLID */
1221 		if (ucode_id == AMDGPU_UCODE_ID_RLC_G
1222 		    || ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL
1223 		    || ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM
1224 		    || ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM
1225 		    || ucode_id == AMDGPU_UCODE_ID_SMC)
1226 			return true;
1227 		else
1228 			return false;
1229 	case IP_VERSION(13, 0, 10):
1230 		/* white list */
1231 		if (ucode_id == AMDGPU_UCODE_ID_CAP
1232 		|| ucode_id == AMDGPU_UCODE_ID_CP_RS64_PFP
1233 		|| ucode_id == AMDGPU_UCODE_ID_CP_RS64_ME
1234 		|| ucode_id == AMDGPU_UCODE_ID_CP_RS64_MEC
1235 		|| ucode_id == AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK
1236 		|| ucode_id == AMDGPU_UCODE_ID_CP_RS64_PFP_P1_STACK
1237 		|| ucode_id == AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK
1238 		|| ucode_id == AMDGPU_UCODE_ID_CP_RS64_ME_P1_STACK
1239 		|| ucode_id == AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK
1240 		|| ucode_id == AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK
1241 		|| ucode_id == AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK
1242 		|| ucode_id == AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK
1243 		|| ucode_id == AMDGPU_UCODE_ID_CP_MES
1244 		|| ucode_id == AMDGPU_UCODE_ID_CP_MES_DATA
1245 		|| ucode_id == AMDGPU_UCODE_ID_CP_MES1
1246 		|| ucode_id == AMDGPU_UCODE_ID_CP_MES1_DATA
1247 		|| ucode_id == AMDGPU_UCODE_ID_VCN1
1248 		|| ucode_id == AMDGPU_UCODE_ID_VCN)
1249 			return false;
1250 		else
1251 			return true;
1252 	default:
1253 		/* lagacy black list */
1254 		if (ucode_id == AMDGPU_UCODE_ID_SDMA0
1255 		    || ucode_id == AMDGPU_UCODE_ID_SDMA1
1256 		    || ucode_id == AMDGPU_UCODE_ID_SDMA2
1257 		    || ucode_id == AMDGPU_UCODE_ID_SDMA3
1258 		    || ucode_id == AMDGPU_UCODE_ID_SDMA4
1259 		    || ucode_id == AMDGPU_UCODE_ID_SDMA5
1260 		    || ucode_id == AMDGPU_UCODE_ID_SDMA6
1261 		    || ucode_id == AMDGPU_UCODE_ID_SDMA7
1262 		    || ucode_id == AMDGPU_UCODE_ID_SDMA_RS64
1263 		    || ucode_id == AMDGPU_UCODE_ID_RLC_G
1264 		    || ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL
1265 		    || ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM
1266 		    || ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM
1267 		    || ucode_id == AMDGPU_UCODE_ID_SMC)
1268 			return true;
1269 		else
1270 			return false;
1271 	}
1272 }
1273 
1274 void amdgpu_virt_update_sriov_video_codec(struct amdgpu_device *adev,
1275 			struct amdgpu_video_codec_info *encode, uint32_t encode_array_size,
1276 			struct amdgpu_video_codec_info *decode, uint32_t decode_array_size)
1277 {
1278 	uint32_t i;
1279 
1280 	if (!adev->virt.is_mm_bw_enabled)
1281 		return;
1282 
1283 	if (encode) {
1284 		for (i = 0; i < encode_array_size; i++) {
1285 			encode[i].max_width = adev->virt.encode_max_dimension_pixels;
1286 			encode[i].max_pixels_per_frame = adev->virt.encode_max_frame_pixels;
1287 			if (encode[i].max_width > 0)
1288 				encode[i].max_height = encode[i].max_pixels_per_frame / encode[i].max_width;
1289 			else
1290 				encode[i].max_height = 0;
1291 		}
1292 	}
1293 
1294 	if (decode) {
1295 		for (i = 0; i < decode_array_size; i++) {
1296 			decode[i].max_width = adev->virt.decode_max_dimension_pixels;
1297 			decode[i].max_pixels_per_frame = adev->virt.decode_max_frame_pixels;
1298 			if (decode[i].max_width > 0)
1299 				decode[i].max_height = decode[i].max_pixels_per_frame / decode[i].max_width;
1300 			else
1301 				decode[i].max_height = 0;
1302 		}
1303 	}
1304 }
1305 
1306 bool amdgpu_virt_get_rlcg_reg_access_flag(struct amdgpu_device *adev,
1307 						 u32 acc_flags, u32 hwip,
1308 						 bool write, u32 *rlcg_flag)
1309 {
1310 	bool ret = false;
1311 
1312 	switch (hwip) {
1313 	case GC_HWIP:
1314 		if (amdgpu_sriov_reg_indirect_gc(adev)) {
1315 			*rlcg_flag =
1316 				write ? AMDGPU_RLCG_GC_WRITE : AMDGPU_RLCG_GC_READ;
1317 			ret = true;
1318 		/* only in new version, AMDGPU_REGS_NO_KIQ and
1319 		 * AMDGPU_REGS_RLC are enabled simultaneously */
1320 		} else if ((acc_flags & AMDGPU_REGS_RLC) &&
1321 				!(acc_flags & AMDGPU_REGS_NO_KIQ) && write) {
1322 			*rlcg_flag = AMDGPU_RLCG_GC_WRITE_LEGACY;
1323 			ret = true;
1324 		}
1325 		break;
1326 	case MMHUB_HWIP:
1327 		if (amdgpu_sriov_reg_indirect_mmhub(adev) &&
1328 		    (acc_flags & AMDGPU_REGS_RLC) && write) {
1329 			*rlcg_flag = AMDGPU_RLCG_MMHUB_WRITE;
1330 			ret = true;
1331 		}
1332 		break;
1333 	default:
1334 		break;
1335 	}
1336 	return ret;
1337 }
1338 
1339 static u32 amdgpu_virt_rlcg_vfi_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v, u32 flag, u32 xcc_id)
1340 {
1341 	uint32_t timeout = 100;
1342 	uint32_t i;
1343 
1344 	struct amdgpu_rlcg_reg_access_ctrl *reg_access_ctrl;
1345 	void *vfi_cmd;
1346 	void *vfi_stat;
1347 	void *vfi_addr;
1348 	void *vfi_data;
1349 	void *vfi_grbm_cntl;
1350 	void *vfi_grbm_idx;
1351 	uint32_t cmd;
1352 	uint32_t stat;
1353 	uint32_t addr = offset;
1354 	uint32_t data;
1355 	uint32_t grbm_cntl_data;
1356 	uint32_t grbm_idx_data;
1357 
1358 	unsigned long flags;
1359 	bool is_err = true;
1360 
1361 	if (!adev->gfx.rlc.rlcg_reg_access_supported) {
1362 		dev_err(adev->dev, "VFi interface is not available\n");
1363 		return 0;
1364 	}
1365 
1366 	if (adev->gfx.xcc_mask && (((1 << xcc_id) & adev->gfx.xcc_mask) == 0)) {
1367 		dev_err(adev->dev, "VFi invalid XCC, xcc_id=0x%x\n", xcc_id);
1368 		return 0;
1369 	}
1370 
1371 	if (amdgpu_device_skip_hw_access(adev))
1372 		return 0;
1373 
1374 	reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl[xcc_id];
1375 	vfi_cmd  = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->vfi_cmd;
1376 	vfi_stat = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->vfi_stat;
1377 	vfi_addr = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->vfi_addr;
1378 	vfi_data = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->vfi_data;
1379 	vfi_grbm_cntl = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->vfi_grbm_cntl;
1380 	vfi_grbm_idx  = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->vfi_grbm_idx;
1381 	grbm_cntl_data = reg_access_ctrl->vfi_grbm_cntl_data;
1382 	grbm_idx_data  = reg_access_ctrl->vfi_grbm_idx_data;
1383 
1384 	if (flag == AMDGPU_RLCG_GC_WRITE) {
1385 		data = v;
1386 		cmd = AMDGPU_RLCG_VFI_CMD__WR;
1387 
1388 		// the GRBM_GFX_CNTL and GRBM_GFX_INDEX are protected by mutex outside this call
1389 		if (addr == reg_access_ctrl->grbm_cntl) {
1390 			reg_access_ctrl->vfi_grbm_cntl_data = data;
1391 			return 0;
1392 		} else if (addr == reg_access_ctrl->grbm_idx) {
1393 			reg_access_ctrl->vfi_grbm_idx_data = data;
1394 			return 0;
1395 		}
1396 
1397 	} else if (flag == AMDGPU_RLCG_GC_READ) {
1398 		data = 0;
1399 		cmd = AMDGPU_RLCG_VFI_CMD__RD;
1400 
1401 		// the GRBM_GFX_CNTL and GRBM_GFX_INDEX are protected by mutex outside this call
1402 		if (addr == reg_access_ctrl->grbm_cntl)
1403 			return grbm_cntl_data;
1404 		else if (addr == reg_access_ctrl->grbm_idx)
1405 			return grbm_idx_data;
1406 
1407 	} else {
1408 		dev_err(adev->dev, "VFi invalid access, flag=0x%x\n", flag);
1409 		return 0;
1410 	}
1411 
1412 	spin_lock_irqsave(&adev->virt.rlcg_reg_lock, flags);
1413 
1414 	writel(addr, vfi_addr);
1415 	writel(data, vfi_data);
1416 	writel(grbm_cntl_data, vfi_grbm_cntl);
1417 	writel(grbm_idx_data,  vfi_grbm_idx);
1418 
1419 	writel(AMDGPU_RLCG_VFI_STAT__BUSY, vfi_stat);
1420 	writel(cmd, vfi_cmd);
1421 
1422 	for (i = 0; i < timeout; i++) {
1423 		stat = readl(vfi_stat);
1424 		if (stat != AMDGPU_RLCG_VFI_STAT__BUSY)
1425 			break;
1426 		udelay(10);
1427 	}
1428 
1429 	switch (stat) {
1430 	case AMDGPU_RLCG_VFI_STAT__DONE:
1431 		is_err = false;
1432 		if (cmd == AMDGPU_RLCG_VFI_CMD__RD)
1433 			data = readl(vfi_data);
1434 		break;
1435 	case AMDGPU_RLCG_VFI_STAT__BUSY:
1436 		dev_err(adev->dev, "VFi access timeout\n");
1437 		break;
1438 	case AMDGPU_RLCG_VFI_STAT__INV_CMD:
1439 		dev_err(adev->dev, "VFi invalid command\n");
1440 		break;
1441 	case AMDGPU_RLCG_VFI_STAT__INV_ADDR:
1442 		dev_err(adev->dev, "VFi invalid address\n");
1443 		break;
1444 	case AMDGPU_RLCG_VFI_STAT__ERR:
1445 		dev_err(adev->dev, "VFi unknown error\n");
1446 		break;
1447 	default:
1448 		dev_err(adev->dev, "VFi unknown status code\n");
1449 		break;
1450 	}
1451 
1452 	spin_unlock_irqrestore(&adev->virt.rlcg_reg_lock, flags);
1453 
1454 	if (is_err)
1455 		dev_err(adev->dev, "VFi: [grbm_cntl=0x%x grbm_idx=0x%x] addr=0x%x (byte addr 0x%x), data=0x%x, cmd=0x%x\n",
1456 			grbm_cntl_data, grbm_idx_data,
1457 			addr, addr * 4, data, cmd);
1458 	else
1459 		dev_dbg(adev->dev, "VFi: [grbm_cntl=0x%x grbm_idx=0x%x] addr=0x%x (byte addr 0x%x), data=0x%x, cmd=0x%x\n",
1460 			grbm_cntl_data, grbm_idx_data,
1461 			addr, addr * 4, data, cmd);
1462 
1463 	return data;
1464 }
1465 
1466 u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v, u32 flag, u32 xcc_id)
1467 {
1468 	struct amdgpu_rlcg_reg_access_ctrl *reg_access_ctrl;
1469 	uint32_t timeout = 50000;
1470 	uint32_t i, tmp;
1471 	uint32_t ret = 0;
1472 	void *scratch_reg0;
1473 	void *scratch_reg1;
1474 	void *scratch_reg2;
1475 	void *scratch_reg3;
1476 	void *spare_int;
1477 	unsigned long flags;
1478 
1479 	if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(12, 1, 0))
1480 		return amdgpu_virt_rlcg_vfi_reg_rw(adev, offset, v, flag, xcc_id);
1481 
1482 	if (!adev->gfx.rlc.rlcg_reg_access_supported) {
1483 		dev_err(adev->dev,
1484 			"indirect registers access through rlcg is not available\n");
1485 		return 0;
1486 	}
1487 
1488 	if (adev->gfx.xcc_mask && (((1 << xcc_id) & adev->gfx.xcc_mask) == 0)) {
1489 		dev_err(adev->dev, "invalid xcc\n");
1490 		return 0;
1491 	}
1492 
1493 	if (amdgpu_device_skip_hw_access(adev))
1494 		return 0;
1495 
1496 	reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl[xcc_id];
1497 	scratch_reg0 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg0;
1498 	scratch_reg1 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg1;
1499 	scratch_reg2 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg2;
1500 	scratch_reg3 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg3;
1501 
1502 	spin_lock_irqsave(&adev->virt.rlcg_reg_lock, flags);
1503 
1504 	if (reg_access_ctrl->spare_int)
1505 		spare_int = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->spare_int;
1506 
1507 	if (offset == reg_access_ctrl->grbm_cntl) {
1508 		/* if the target reg offset is grbm_cntl, write to scratch_reg2 */
1509 		writel(v, scratch_reg2);
1510 		if (flag == AMDGPU_RLCG_GC_WRITE_LEGACY)
1511 			writel(v, ((void __iomem *)adev->rmmio) + (offset * 4));
1512 	} else if (offset == reg_access_ctrl->grbm_idx) {
1513 		/* if the target reg offset is grbm_idx, write to scratch_reg3 */
1514 		writel(v, scratch_reg3);
1515 		if (flag == AMDGPU_RLCG_GC_WRITE_LEGACY)
1516 			writel(v, ((void __iomem *)adev->rmmio) + (offset * 4));
1517 	} else {
1518 		/*
1519 		 * SCRATCH_REG0 	= read/write value
1520 		 * SCRATCH_REG1[30:28]	= command
1521 		 * SCRATCH_REG1[19:0]	= address in dword
1522 		 * SCRATCH_REG1[27:24]	= Error reporting
1523 		 */
1524 		writel(v, scratch_reg0);
1525 		writel((offset | flag), scratch_reg1);
1526 		if (reg_access_ctrl->spare_int)
1527 			writel(1, spare_int);
1528 
1529 		for (i = 0; i < timeout; i++) {
1530 			tmp = readl(scratch_reg1);
1531 			if (!(tmp & AMDGPU_RLCG_SCRATCH1_ADDRESS_MASK))
1532 				break;
1533 			udelay(10);
1534 		}
1535 
1536 		tmp = readl(scratch_reg1);
1537 		if (i >= timeout || (tmp & AMDGPU_RLCG_SCRATCH1_ERROR_MASK) != 0) {
1538 			if (amdgpu_sriov_rlcg_error_report_enabled(adev)) {
1539 				if (tmp & AMDGPU_RLCG_VFGATE_DISABLED) {
1540 					dev_err(adev->dev,
1541 						"vfgate is disabled, rlcg failed to program reg: 0x%05x\n", offset);
1542 				} else if (tmp & AMDGPU_RLCG_WRONG_OPERATION_TYPE) {
1543 					dev_err(adev->dev,
1544 						"wrong operation type, rlcg failed to program reg: 0x%05x\n", offset);
1545 				} else if (tmp & AMDGPU_RLCG_REG_NOT_IN_RANGE) {
1546 					dev_err(adev->dev,
1547 						"register is not in range, rlcg failed to program reg: 0x%05x\n", offset);
1548 				} else {
1549 					dev_err(adev->dev,
1550 						"unknown error type, rlcg failed to program reg: 0x%05x\n", offset);
1551 				}
1552 			} else {
1553 				dev_err(adev->dev,
1554 					"timeout: rlcg faled to program reg: 0x%05x\n", offset);
1555 			}
1556 		}
1557 	}
1558 
1559 	ret = readl(scratch_reg0);
1560 
1561 	spin_unlock_irqrestore(&adev->virt.rlcg_reg_lock, flags);
1562 
1563 	return ret;
1564 }
1565 
1566 void amdgpu_sriov_wreg(struct amdgpu_device *adev,
1567 		       u32 offset, u32 value,
1568 		       u32 acc_flags, u32 hwip, u32 xcc_id)
1569 {
1570 	u32 rlcg_flag;
1571 
1572 	if (amdgpu_device_skip_hw_access(adev))
1573 		return;
1574 
1575 	if (!amdgpu_sriov_runtime(adev) &&
1576 		amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags, hwip, true, &rlcg_flag)) {
1577 		amdgpu_virt_rlcg_reg_rw(adev, offset, value, rlcg_flag, xcc_id);
1578 		return;
1579 	}
1580 
1581 	if (acc_flags & AMDGPU_REGS_NO_KIQ)
1582 		WREG32_NO_KIQ(offset, value);
1583 	else
1584 		WREG32(offset, value);
1585 }
1586 
1587 u32 amdgpu_sriov_rreg(struct amdgpu_device *adev,
1588 		      u32 offset, u32 acc_flags, u32 hwip, u32 xcc_id)
1589 {
1590 	u32 rlcg_flag;
1591 
1592 	if (amdgpu_device_skip_hw_access(adev))
1593 		return 0;
1594 
1595 	if (!amdgpu_sriov_runtime(adev) &&
1596 		amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags, hwip, false, &rlcg_flag))
1597 		return amdgpu_virt_rlcg_reg_rw(adev, offset, 0, rlcg_flag, xcc_id);
1598 
1599 	if (acc_flags & AMDGPU_REGS_NO_KIQ)
1600 		return RREG32_NO_KIQ(offset);
1601 	else
1602 		return RREG32(offset);
1603 }
1604 
1605 bool amdgpu_sriov_xnack_support(struct amdgpu_device *adev)
1606 {
1607 	bool xnack_mode = true;
1608 
1609 	if (amdgpu_sriov_vf(adev) &&
1610 	    amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2))
1611 		xnack_mode = false;
1612 
1613 	return xnack_mode;
1614 }
1615 
1616 bool amdgpu_virt_get_ras_capability(struct amdgpu_device *adev)
1617 {
1618 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1619 
1620 	if (!amdgpu_sriov_ras_caps_en(adev))
1621 		return false;
1622 
1623 	if (adev->virt.ras_en_caps.bits.block_umc)
1624 		adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__UMC);
1625 	if (adev->virt.ras_en_caps.bits.block_sdma)
1626 		adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__SDMA);
1627 	if (adev->virt.ras_en_caps.bits.block_gfx)
1628 		adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__GFX);
1629 	if (adev->virt.ras_en_caps.bits.block_mmhub)
1630 		adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__MMHUB);
1631 	if (adev->virt.ras_en_caps.bits.block_athub)
1632 		adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__ATHUB);
1633 	if (adev->virt.ras_en_caps.bits.block_pcie_bif)
1634 		adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__PCIE_BIF);
1635 	if (adev->virt.ras_en_caps.bits.block_hdp)
1636 		adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__HDP);
1637 	if (adev->virt.ras_en_caps.bits.block_xgmi_wafl)
1638 		adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__XGMI_WAFL);
1639 	if (adev->virt.ras_en_caps.bits.block_df)
1640 		adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__DF);
1641 	if (adev->virt.ras_en_caps.bits.block_smn)
1642 		adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__SMN);
1643 	if (adev->virt.ras_en_caps.bits.block_sem)
1644 		adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__SEM);
1645 	if (adev->virt.ras_en_caps.bits.block_mp0)
1646 		adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__MP0);
1647 	if (adev->virt.ras_en_caps.bits.block_mp1)
1648 		adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__MP1);
1649 	if (adev->virt.ras_en_caps.bits.block_fuse)
1650 		adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__FUSE);
1651 	if (adev->virt.ras_en_caps.bits.block_mca)
1652 		adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__MCA);
1653 	if (adev->virt.ras_en_caps.bits.block_vcn)
1654 		adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__VCN);
1655 	if (adev->virt.ras_en_caps.bits.block_jpeg)
1656 		adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__JPEG);
1657 	if (adev->virt.ras_en_caps.bits.block_ih)
1658 		adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__IH);
1659 	if (adev->virt.ras_en_caps.bits.block_mpio)
1660 		adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__MPIO);
1661 
1662 	if (adev->virt.ras_en_caps.bits.poison_propogation_mode)
1663 		con->poison_supported = true; /* Poison is handled by host */
1664 
1665 	if (adev->virt.ras_en_caps.bits.uniras_supported)
1666 		amdgpu_virt_ras_set_remote_uniras(adev, true);
1667 
1668 	return true;
1669 }
1670 
1671 static inline enum amd_sriov_ras_telemetry_gpu_block
1672 amdgpu_ras_block_to_sriov(struct amdgpu_device *adev, enum amdgpu_ras_block block) {
1673 	switch (block) {
1674 	case AMDGPU_RAS_BLOCK__UMC:
1675 		return RAS_TELEMETRY_GPU_BLOCK_UMC;
1676 	case AMDGPU_RAS_BLOCK__SDMA:
1677 		return RAS_TELEMETRY_GPU_BLOCK_SDMA;
1678 	case AMDGPU_RAS_BLOCK__GFX:
1679 		return RAS_TELEMETRY_GPU_BLOCK_GFX;
1680 	case AMDGPU_RAS_BLOCK__MMHUB:
1681 		return RAS_TELEMETRY_GPU_BLOCK_MMHUB;
1682 	case AMDGPU_RAS_BLOCK__ATHUB:
1683 		return RAS_TELEMETRY_GPU_BLOCK_ATHUB;
1684 	case AMDGPU_RAS_BLOCK__PCIE_BIF:
1685 		return RAS_TELEMETRY_GPU_BLOCK_PCIE_BIF;
1686 	case AMDGPU_RAS_BLOCK__HDP:
1687 		return RAS_TELEMETRY_GPU_BLOCK_HDP;
1688 	case AMDGPU_RAS_BLOCK__XGMI_WAFL:
1689 		return RAS_TELEMETRY_GPU_BLOCK_XGMI_WAFL;
1690 	case AMDGPU_RAS_BLOCK__DF:
1691 		return RAS_TELEMETRY_GPU_BLOCK_DF;
1692 	case AMDGPU_RAS_BLOCK__SMN:
1693 		return RAS_TELEMETRY_GPU_BLOCK_SMN;
1694 	case AMDGPU_RAS_BLOCK__SEM:
1695 		return RAS_TELEMETRY_GPU_BLOCK_SEM;
1696 	case AMDGPU_RAS_BLOCK__MP0:
1697 		return RAS_TELEMETRY_GPU_BLOCK_MP0;
1698 	case AMDGPU_RAS_BLOCK__MP1:
1699 		return RAS_TELEMETRY_GPU_BLOCK_MP1;
1700 	case AMDGPU_RAS_BLOCK__FUSE:
1701 		return RAS_TELEMETRY_GPU_BLOCK_FUSE;
1702 	case AMDGPU_RAS_BLOCK__MCA:
1703 		return RAS_TELEMETRY_GPU_BLOCK_MCA;
1704 	case AMDGPU_RAS_BLOCK__VCN:
1705 		return RAS_TELEMETRY_GPU_BLOCK_VCN;
1706 	case AMDGPU_RAS_BLOCK__JPEG:
1707 		return RAS_TELEMETRY_GPU_BLOCK_JPEG;
1708 	case AMDGPU_RAS_BLOCK__IH:
1709 		return RAS_TELEMETRY_GPU_BLOCK_IH;
1710 	case AMDGPU_RAS_BLOCK__MPIO:
1711 		return RAS_TELEMETRY_GPU_BLOCK_MPIO;
1712 	default:
1713 		dev_warn(adev->dev, "Unsupported SRIOV RAS telemetry block 0x%x\n",
1714 			      block);
1715 		return RAS_TELEMETRY_GPU_BLOCK_COUNT;
1716 	}
1717 }
1718 
1719 static int amdgpu_virt_cache_host_error_counts(struct amdgpu_device *adev,
1720 					       struct amdsriov_ras_telemetry *host_telemetry)
1721 {
1722 	struct amd_sriov_ras_telemetry_error_count *tmp = NULL;
1723 	uint32_t checksum, used_size;
1724 
1725 	checksum = host_telemetry->header.checksum;
1726 	used_size = host_telemetry->header.used_size;
1727 
1728 	if (used_size > (AMD_SRIOV_MSG_RAS_TELEMETRY_SIZE_KB_V1 << 10))
1729 		return 0;
1730 
1731 	tmp = kmemdup(&host_telemetry->body.error_count, used_size, GFP_KERNEL);
1732 	if (!tmp)
1733 		return -ENOMEM;
1734 
1735 	if (checksum != amd_sriov_msg_checksum(tmp, used_size, 0, 0))
1736 		goto out;
1737 
1738 	memcpy(&adev->virt.count_cache, tmp,
1739 	       min(used_size, sizeof(adev->virt.count_cache)));
1740 out:
1741 	kfree(tmp);
1742 
1743 	return 0;
1744 }
1745 
1746 static int amdgpu_virt_req_ras_err_count_internal(struct amdgpu_device *adev, bool force_update)
1747 {
1748 	struct amdgpu_virt *virt = &adev->virt;
1749 
1750 	if (!virt->ops || !virt->ops->req_ras_err_count)
1751 		return -EOPNOTSUPP;
1752 
1753 	/* Host allows 15 ras telemetry requests per 60 seconds. Afterwhich, the Host
1754 	 * will ignore incoming guest messages. Ratelimit the guest messages to
1755 	 * prevent guest self DOS.
1756 	 */
1757 	if (__ratelimit(&virt->ras.ras_error_cnt_rs) || force_update) {
1758 		mutex_lock(&virt->ras.ras_telemetry_mutex);
1759 		if (!virt->ops->req_ras_err_count(adev))
1760 			amdgpu_virt_cache_host_error_counts(adev,
1761 				virt->fw_reserve.ras_telemetry);
1762 		mutex_unlock(&virt->ras.ras_telemetry_mutex);
1763 	}
1764 
1765 	return 0;
1766 }
1767 
1768 /* Bypass ACA interface and query ECC counts directly from host */
1769 int amdgpu_virt_req_ras_err_count(struct amdgpu_device *adev, enum amdgpu_ras_block block,
1770 				  struct ras_err_data *err_data)
1771 {
1772 	enum amd_sriov_ras_telemetry_gpu_block sriov_block;
1773 
1774 	sriov_block = amdgpu_ras_block_to_sriov(adev, block);
1775 
1776 	if (sriov_block >= RAS_TELEMETRY_GPU_BLOCK_COUNT ||
1777 	    !amdgpu_sriov_ras_telemetry_block_en(adev, sriov_block))
1778 		return -EOPNOTSUPP;
1779 
1780 	/* Host Access may be lost during reset, just return last cached data. */
1781 	if (down_read_trylock(&adev->reset_domain->sem)) {
1782 		amdgpu_virt_req_ras_err_count_internal(adev, false);
1783 		up_read(&adev->reset_domain->sem);
1784 	}
1785 
1786 	err_data->ue_count = adev->virt.count_cache.block[sriov_block].ue_count;
1787 	err_data->ce_count = adev->virt.count_cache.block[sriov_block].ce_count;
1788 	err_data->de_count = adev->virt.count_cache.block[sriov_block].de_count;
1789 
1790 	return 0;
1791 }
1792 
1793 static int
1794 amdgpu_virt_write_cpers_to_ring(struct amdgpu_device *adev,
1795 				struct amdsriov_ras_telemetry *host_telemetry,
1796 				u32 *more)
1797 {
1798 	struct amd_sriov_ras_cper_dump *cper_dump = NULL;
1799 	struct cper_hdr *entry = NULL;
1800 	struct amdgpu_ring *ring = &adev->cper.ring_buf;
1801 	uint32_t checksum, used_size, i;
1802 	int ret = 0;
1803 
1804 	checksum = host_telemetry->header.checksum;
1805 	used_size = host_telemetry->header.used_size;
1806 
1807 	if (used_size > (AMD_SRIOV_MSG_RAS_TELEMETRY_SIZE_KB_V1 << 10))
1808 		return -EINVAL;
1809 
1810 	cper_dump = kmemdup(&host_telemetry->body.cper_dump, used_size, GFP_KERNEL);
1811 	if (!cper_dump)
1812 		return -ENOMEM;
1813 
1814 	if (checksum != amd_sriov_msg_checksum(cper_dump, used_size, 0, 0)) {
1815 		ret = -EINVAL;
1816 		goto out;
1817 	}
1818 
1819 	*more = cper_dump->more;
1820 
1821 	if (cper_dump->wptr < adev->virt.ras.cper_rptr) {
1822 		dev_warn(
1823 			adev->dev,
1824 			"guest specified rptr that was too high! guest rptr: 0x%llx, host rptr: 0x%llx\n",
1825 			adev->virt.ras.cper_rptr, cper_dump->wptr);
1826 
1827 		adev->virt.ras.cper_rptr = cper_dump->wptr;
1828 		goto out;
1829 	}
1830 
1831 	entry = (struct cper_hdr *)&cper_dump->buf[0];
1832 
1833 	for (i = 0; i < cper_dump->count; i++) {
1834 		amdgpu_cper_ring_write(ring, entry, entry->record_length);
1835 		entry = (struct cper_hdr *)((char *)entry +
1836 					    entry->record_length);
1837 	}
1838 
1839 	if (cper_dump->overflow_count)
1840 		dev_warn(adev->dev,
1841 			 "host reported CPER overflow of 0x%llx entries!\n",
1842 			 cper_dump->overflow_count);
1843 
1844 	adev->virt.ras.cper_rptr = cper_dump->wptr;
1845 out:
1846 	kfree(cper_dump);
1847 
1848 	return ret;
1849 }
1850 
1851 static int amdgpu_virt_req_ras_cper_dump_internal(struct amdgpu_device *adev)
1852 {
1853 	struct amdgpu_virt *virt = &adev->virt;
1854 	int ret = 0;
1855 	uint32_t more = 0;
1856 
1857 	if (!virt->ops || !virt->ops->req_ras_cper_dump)
1858 		return -EOPNOTSUPP;
1859 
1860 	do {
1861 		if (!virt->ops->req_ras_cper_dump(adev, virt->ras.cper_rptr))
1862 			ret = amdgpu_virt_write_cpers_to_ring(
1863 				adev, virt->fw_reserve.ras_telemetry, &more);
1864 		else
1865 			ret = 0;
1866 	} while (more && !ret);
1867 
1868 	return ret;
1869 }
1870 
1871 int amdgpu_virt_req_ras_cper_dump(struct amdgpu_device *adev, bool force_update)
1872 {
1873 	struct amdgpu_virt *virt = &adev->virt;
1874 	int ret = 0;
1875 
1876 	if (!amdgpu_sriov_ras_cper_en(adev))
1877 		return -EOPNOTSUPP;
1878 
1879 	if ((__ratelimit(&virt->ras.ras_cper_dump_rs) || force_update) &&
1880 	    down_read_trylock(&adev->reset_domain->sem)) {
1881 		mutex_lock(&virt->ras.ras_telemetry_mutex);
1882 		ret = amdgpu_virt_req_ras_cper_dump_internal(adev);
1883 		mutex_unlock(&virt->ras.ras_telemetry_mutex);
1884 		up_read(&adev->reset_domain->sem);
1885 	}
1886 
1887 	return ret;
1888 }
1889 
1890 int amdgpu_virt_ras_telemetry_post_reset(struct amdgpu_device *adev)
1891 {
1892 	unsigned long ue_count, ce_count;
1893 
1894 	if (amdgpu_sriov_ras_telemetry_en(adev)) {
1895 		amdgpu_virt_req_ras_err_count_internal(adev, true);
1896 		amdgpu_ras_query_error_count(adev, &ce_count, &ue_count, NULL);
1897 	}
1898 
1899 	return 0;
1900 }
1901 
1902 bool amdgpu_virt_ras_telemetry_block_en(struct amdgpu_device *adev,
1903 					enum amdgpu_ras_block block)
1904 {
1905 	enum amd_sriov_ras_telemetry_gpu_block sriov_block;
1906 
1907 	sriov_block = amdgpu_ras_block_to_sriov(adev, block);
1908 
1909 	if (sriov_block >= RAS_TELEMETRY_GPU_BLOCK_COUNT ||
1910 	    !amdgpu_sriov_ras_telemetry_block_en(adev, sriov_block))
1911 		return false;
1912 
1913 	return true;
1914 }
1915 
1916 /*
1917  * amdgpu_virt_request_bad_pages() - request bad pages
1918  * @adev: amdgpu device.
1919  * Send command to GPU hypervisor to write new bad pages into the shared PF2VF region
1920  */
1921 void amdgpu_virt_request_bad_pages(struct amdgpu_device *adev)
1922 {
1923 	struct amdgpu_virt *virt = &adev->virt;
1924 
1925 	if (virt->ops && virt->ops->req_bad_pages)
1926 		virt->ops->req_bad_pages(adev);
1927 }
1928 
1929 static int amdgpu_virt_cache_chk_criti_hit(struct amdgpu_device *adev,
1930 					   struct amdsriov_ras_telemetry *host_telemetry,
1931 					   bool *hit)
1932 {
1933 	struct amd_sriov_ras_chk_criti *tmp = NULL;
1934 	uint32_t checksum, used_size;
1935 
1936 	checksum = host_telemetry->header.checksum;
1937 	used_size = host_telemetry->header.used_size;
1938 
1939 	if (used_size > (AMD_SRIOV_MSG_RAS_TELEMETRY_SIZE_KB_V1 << 10))
1940 		return 0;
1941 
1942 	tmp = kmemdup(&host_telemetry->body.chk_criti, used_size, GFP_KERNEL);
1943 	if (!tmp)
1944 		return -ENOMEM;
1945 
1946 	if (checksum != amd_sriov_msg_checksum(tmp, used_size, 0, 0))
1947 		goto out;
1948 
1949 	if (hit)
1950 		*hit = tmp->hit ? true : false;
1951 
1952 out:
1953 	kfree(tmp);
1954 
1955 	return 0;
1956 }
1957 
1958 int amdgpu_virt_check_vf_critical_region(struct amdgpu_device *adev, u64 addr, bool *hit)
1959 {
1960 	struct amdgpu_virt *virt = &adev->virt;
1961 	int r = -EPERM;
1962 
1963 	if (!virt->ops || !virt->ops->req_ras_chk_criti)
1964 		return -EOPNOTSUPP;
1965 
1966 	/* Host allows 15 ras telemetry requests per 60 seconds. Afterwhich, the Host
1967 	 * will ignore incoming guest messages. Ratelimit the guest messages to
1968 	 * prevent guest self DOS.
1969 	 */
1970 	if (__ratelimit(&virt->ras.ras_chk_criti_rs)) {
1971 		mutex_lock(&virt->ras.ras_telemetry_mutex);
1972 		if (!virt->ops->req_ras_chk_criti(adev, addr))
1973 			r = amdgpu_virt_cache_chk_criti_hit(
1974 				adev, virt->fw_reserve.ras_telemetry, hit);
1975 		mutex_unlock(&virt->ras.ras_telemetry_mutex);
1976 	}
1977 
1978 	return r;
1979 }
1980 
1981 static int req_remote_ras_cmd(struct amdgpu_device *adev,
1982 			u32 param1, u32 param2, u32 param3)
1983 {
1984 	struct amdgpu_virt *virt = &adev->virt;
1985 
1986 	if (virt->ops && virt->ops->req_remote_ras_cmd)
1987 		return virt->ops->req_remote_ras_cmd(adev, param1, param2, param3);
1988 	return -ENOENT;
1989 }
1990 
1991 int amdgpu_virt_send_remote_ras_cmd(struct amdgpu_device *adev,
1992 		uint64_t buf, uint32_t buf_len)
1993 {
1994 	uint64_t gpa = buf;
1995 	int ret = -EIO;
1996 
1997 	if (down_read_trylock(&adev->reset_domain->sem)) {
1998 		ret = req_remote_ras_cmd(adev,
1999 			lower_32_bits(gpa), upper_32_bits(gpa), buf_len);
2000 		up_read(&adev->reset_domain->sem);
2001 	}
2002 
2003 	return ret;
2004 }
2005