xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c (revision face6a3615a649456eb4549f6d474221d877d604)
1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <linux/module.h>
25 
26 #ifdef CONFIG_X86
27 #include <asm/hypervisor.h>
28 #endif
29 
30 #include <drm/drm_drv.h>
31 #include <xen/xen.h>
32 
33 #include "amdgpu.h"
34 #include "amdgpu_ras.h"
35 #include "amdgpu_reset.h"
36 #include "amdgpu_dpm.h"
37 #include "vi.h"
38 #include "soc15.h"
39 #include "nv.h"
40 
41 #define POPULATE_UCODE_INFO(vf2pf_info, ucode, ver) \
42 	do { \
43 		vf2pf_info->ucode_info[ucode].id = ucode; \
44 		vf2pf_info->ucode_info[ucode].version = ver; \
45 	} while (0)
46 
47 #define mmRCC_CONFIG_MEMSIZE    0xde3
48 
49 const char *amdgpu_virt_dynamic_crit_table_name[] = {
50 	"IP DISCOVERY",
51 	"VBIOS IMG",
52 	"RAS TELEMETRY",
53 	"DATA EXCHANGE",
54 	"BAD PAGE INFO",
55 	"INIT HEADER",
56 	"LAST",
57 };
58 
59 bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev)
60 {
61 	/* By now all MMIO pages except mailbox are blocked */
62 	/* if blocking is enabled in hypervisor. Choose the */
63 	/* SCRATCH_REG0 to test. */
64 	return RREG32_NO_KIQ(0xc040) == 0xffffffff;
65 }
66 
67 void amdgpu_virt_init_setting(struct amdgpu_device *adev)
68 {
69 	struct drm_device *ddev = adev_to_drm(adev);
70 
71 	/* enable virtual display */
72 	if (adev->asic_type != CHIP_ALDEBARAN &&
73 	    adev->asic_type != CHIP_ARCTURUS &&
74 	    ((adev->pdev->class >> 8) != PCI_CLASS_ACCELERATOR_PROCESSING)) {
75 		if (adev->mode_info.num_crtc == 0)
76 			adev->mode_info.num_crtc = 1;
77 		adev->enable_virtual_display = true;
78 	}
79 	ddev->driver_features &= ~DRIVER_ATOMIC;
80 	adev->cg_flags = 0;
81 	adev->pg_flags = 0;
82 
83 	/* Reduce kcq number to 2 to reduce latency */
84 	if (amdgpu_num_kcq == -1)
85 		amdgpu_num_kcq = 2;
86 }
87 
88 /**
89  * amdgpu_virt_request_full_gpu() - request full gpu access
90  * @adev:	amdgpu device.
91  * @init:	is driver init time.
92  * When start to init/fini driver, first need to request full gpu access.
93  * Return: Zero if request success, otherwise will return error.
94  */
95 int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init)
96 {
97 	struct amdgpu_virt *virt = &adev->virt;
98 	int r;
99 
100 	if (virt->ops && virt->ops->req_full_gpu) {
101 		r = virt->ops->req_full_gpu(adev, init);
102 		if (r) {
103 			adev->no_hw_access = true;
104 			return r;
105 		}
106 
107 		adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
108 	}
109 
110 	return 0;
111 }
112 
113 /**
114  * amdgpu_virt_release_full_gpu() - release full gpu access
115  * @adev:	amdgpu device.
116  * @init:	is driver init time.
117  * When finishing driver init/fini, need to release full gpu access.
118  * Return: Zero if release success, otherwise will returen error.
119  */
120 int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init)
121 {
122 	struct amdgpu_virt *virt = &adev->virt;
123 	int r;
124 
125 	if (virt->ops && virt->ops->rel_full_gpu) {
126 		r = virt->ops->rel_full_gpu(adev, init);
127 		if (r)
128 			return r;
129 
130 		adev->virt.caps |= AMDGPU_SRIOV_CAPS_RUNTIME;
131 	}
132 	return 0;
133 }
134 
135 /**
136  * amdgpu_virt_reset_gpu() - reset gpu
137  * @adev:	amdgpu device.
138  * Send reset command to GPU hypervisor to reset GPU that VM is using
139  * Return: Zero if reset success, otherwise will return error.
140  */
141 int amdgpu_virt_reset_gpu(struct amdgpu_device *adev)
142 {
143 	struct amdgpu_virt *virt = &adev->virt;
144 	int r;
145 
146 	if (virt->ops && virt->ops->reset_gpu) {
147 		r = virt->ops->reset_gpu(adev);
148 		if (r)
149 			return r;
150 
151 		adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
152 	}
153 
154 	return 0;
155 }
156 
157 void amdgpu_virt_request_init_data(struct amdgpu_device *adev)
158 {
159 	struct amdgpu_virt *virt = &adev->virt;
160 
161 	if (virt->ops && virt->ops->req_init_data)
162 		virt->ops->req_init_data(adev);
163 
164 	if (adev->virt.req_init_data_ver > 0)
165 		dev_info(adev->dev, "host supports REQ_INIT_DATA handshake of critical_region_version %d\n",
166 				 adev->virt.req_init_data_ver);
167 	else
168 		dev_warn(adev->dev, "host doesn't support REQ_INIT_DATA handshake\n");
169 }
170 
171 /**
172  * amdgpu_virt_ready_to_reset() - send ready to reset to host
173  * @adev:	amdgpu device.
174  * Send ready to reset message to GPU hypervisor to signal we have stopped GPU
175  * activity and is ready for host FLR
176  */
177 void amdgpu_virt_ready_to_reset(struct amdgpu_device *adev)
178 {
179 	struct amdgpu_virt *virt = &adev->virt;
180 
181 	if (virt->ops && virt->ops->reset_gpu)
182 		virt->ops->ready_to_reset(adev);
183 }
184 
185 /**
186  * amdgpu_virt_wait_reset() - wait for reset gpu completed
187  * @adev:	amdgpu device.
188  * Wait for GPU reset completed.
189  * Return: Zero if reset success, otherwise will return error.
190  */
191 int amdgpu_virt_wait_reset(struct amdgpu_device *adev)
192 {
193 	struct amdgpu_virt *virt = &adev->virt;
194 
195 	if (!virt->ops || !virt->ops->wait_reset)
196 		return -EINVAL;
197 
198 	return virt->ops->wait_reset(adev);
199 }
200 
201 /**
202  * amdgpu_virt_alloc_mm_table() - alloc memory for mm table
203  * @adev:	amdgpu device.
204  * MM table is used by UVD and VCE for its initialization
205  * Return: Zero if allocate success.
206  */
207 int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev)
208 {
209 	int r;
210 
211 	if (!amdgpu_sriov_vf(adev) || adev->virt.mm_table.gpu_addr)
212 		return 0;
213 
214 	r = amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE,
215 				    AMDGPU_GEM_DOMAIN_VRAM |
216 				    AMDGPU_GEM_DOMAIN_GTT,
217 				    &adev->virt.mm_table.bo,
218 				    &adev->virt.mm_table.gpu_addr,
219 				    (void *)&adev->virt.mm_table.cpu_addr);
220 	if (r) {
221 		dev_err(adev->dev, "failed to alloc mm table and error = %d.\n", r);
222 		return r;
223 	}
224 
225 	memset((void *)adev->virt.mm_table.cpu_addr, 0, PAGE_SIZE);
226 	dev_info(adev->dev, "MM table gpu addr = 0x%llx, cpu addr = %p.\n",
227 		 adev->virt.mm_table.gpu_addr,
228 		 adev->virt.mm_table.cpu_addr);
229 	return 0;
230 }
231 
232 /**
233  * amdgpu_virt_free_mm_table() - free mm table memory
234  * @adev:	amdgpu device.
235  * Free MM table memory
236  */
237 void amdgpu_virt_free_mm_table(struct amdgpu_device *adev)
238 {
239 	if (!amdgpu_sriov_vf(adev) || !adev->virt.mm_table.gpu_addr)
240 		return;
241 
242 	amdgpu_bo_free_kernel(&adev->virt.mm_table.bo,
243 			      &adev->virt.mm_table.gpu_addr,
244 			      (void *)&adev->virt.mm_table.cpu_addr);
245 	adev->virt.mm_table.gpu_addr = 0;
246 }
247 
248 /**
249  * amdgpu_virt_rcvd_ras_interrupt() - receive ras interrupt
250  * @adev:	amdgpu device.
251  * Check whether host sent RAS error message
252  * Return: true if found, otherwise false
253  */
254 bool amdgpu_virt_rcvd_ras_interrupt(struct amdgpu_device *adev)
255 {
256 	struct amdgpu_virt *virt = &adev->virt;
257 
258 	if (!virt->ops || !virt->ops->rcvd_ras_intr)
259 		return false;
260 
261 	return virt->ops->rcvd_ras_intr(adev);
262 }
263 
264 
265 unsigned int amd_sriov_msg_checksum(void *obj,
266 				unsigned long obj_size,
267 				unsigned int key,
268 				unsigned int checksum)
269 {
270 	unsigned int ret = key;
271 	unsigned long i = 0;
272 	unsigned char *pos;
273 
274 	pos = (char *)obj;
275 	/* calculate checksum */
276 	for (i = 0; i < obj_size; ++i)
277 		ret += *(pos + i);
278 	/* minus the checksum itself */
279 	pos = (char *)&checksum;
280 	for (i = 0; i < sizeof(checksum); ++i)
281 		ret -= *(pos + i);
282 	return ret;
283 }
284 
285 static int amdgpu_virt_init_ras_err_handler_data(struct amdgpu_device *adev)
286 {
287 	struct amdgpu_virt *virt = &adev->virt;
288 	struct amdgpu_virt_ras_err_handler_data **data = &virt->virt_eh_data;
289 	/* GPU will be marked bad on host if bp count more then 10,
290 	 * so alloc 512 is enough.
291 	 */
292 	unsigned int align_space = 512;
293 	void *bps = NULL;
294 	struct amdgpu_bo **bps_bo = NULL;
295 
296 	*data = kmalloc(sizeof(struct amdgpu_virt_ras_err_handler_data), GFP_KERNEL);
297 	if (!*data)
298 		goto data_failure;
299 
300 	bps = kmalloc_array(align_space, sizeof(*(*data)->bps), GFP_KERNEL);
301 	if (!bps)
302 		goto bps_failure;
303 
304 	bps_bo = kmalloc_array(align_space, sizeof(*(*data)->bps_bo), GFP_KERNEL);
305 	if (!bps_bo)
306 		goto bps_bo_failure;
307 
308 	(*data)->bps = bps;
309 	(*data)->bps_bo = bps_bo;
310 	(*data)->count = 0;
311 	(*data)->last_reserved = 0;
312 
313 	virt->ras_init_done = true;
314 
315 	return 0;
316 
317 bps_bo_failure:
318 	kfree(bps);
319 bps_failure:
320 	kfree(*data);
321 data_failure:
322 	return -ENOMEM;
323 }
324 
325 static void amdgpu_virt_ras_release_bp(struct amdgpu_device *adev)
326 {
327 	struct amdgpu_virt *virt = &adev->virt;
328 	struct amdgpu_virt_ras_err_handler_data *data = virt->virt_eh_data;
329 	struct amdgpu_bo *bo;
330 	int i;
331 
332 	if (!data)
333 		return;
334 
335 	for (i = data->last_reserved - 1; i >= 0; i--) {
336 		bo = data->bps_bo[i];
337 		if (bo) {
338 			amdgpu_bo_free_kernel(&bo, NULL, NULL);
339 			data->bps_bo[i] = bo;
340 		}
341 		data->last_reserved = i;
342 	}
343 }
344 
345 void amdgpu_virt_release_ras_err_handler_data(struct amdgpu_device *adev)
346 {
347 	struct amdgpu_virt *virt = &adev->virt;
348 	struct amdgpu_virt_ras_err_handler_data *data = virt->virt_eh_data;
349 
350 	virt->ras_init_done = false;
351 
352 	if (!data)
353 		return;
354 
355 	amdgpu_virt_ras_release_bp(adev);
356 
357 	kfree(data->bps);
358 	kfree(data->bps_bo);
359 	kfree(data);
360 	virt->virt_eh_data = NULL;
361 }
362 
363 static void amdgpu_virt_ras_add_bps(struct amdgpu_device *adev,
364 		struct eeprom_table_record *bps, int pages)
365 {
366 	struct amdgpu_virt *virt = &adev->virt;
367 	struct amdgpu_virt_ras_err_handler_data *data = virt->virt_eh_data;
368 
369 	if (!data)
370 		return;
371 
372 	memcpy(&data->bps[data->count], bps, pages * sizeof(*data->bps));
373 	data->count += pages;
374 }
375 
376 static void amdgpu_virt_ras_reserve_bps(struct amdgpu_device *adev)
377 {
378 	struct amdgpu_virt *virt = &adev->virt;
379 	struct amdgpu_virt_ras_err_handler_data *data = virt->virt_eh_data;
380 	struct amdgpu_vram_mgr *mgr = &adev->mman.vram_mgr;
381 	struct ttm_resource_manager *man = &mgr->manager;
382 	struct amdgpu_bo *bo = NULL;
383 	uint64_t bp;
384 	int i;
385 
386 	if (!data)
387 		return;
388 
389 	for (i = data->last_reserved; i < data->count; i++) {
390 		bp = data->bps[i].retired_page;
391 
392 		/* There are two cases of reserve error should be ignored:
393 		 * 1) a ras bad page has been allocated (used by someone);
394 		 * 2) a ras bad page has been reserved (duplicate error injection
395 		 *    for one page);
396 		 */
397 		if  (ttm_resource_manager_used(man)) {
398 			amdgpu_vram_mgr_reserve_range(&adev->mman.vram_mgr,
399 				bp << AMDGPU_GPU_PAGE_SHIFT,
400 				AMDGPU_GPU_PAGE_SIZE);
401 			data->bps_bo[i] = NULL;
402 		} else {
403 			if (amdgpu_bo_create_kernel_at(adev, bp << AMDGPU_GPU_PAGE_SHIFT,
404 							AMDGPU_GPU_PAGE_SIZE,
405 							&bo, NULL))
406 				dev_dbg(adev->dev,
407 						"RAS WARN: reserve vram for retired page %llx fail\n",
408 						bp);
409 			data->bps_bo[i] = bo;
410 		}
411 		data->last_reserved = i + 1;
412 		bo = NULL;
413 	}
414 }
415 
416 static bool amdgpu_virt_ras_check_bad_page(struct amdgpu_device *adev,
417 		uint64_t retired_page)
418 {
419 	struct amdgpu_virt *virt = &adev->virt;
420 	struct amdgpu_virt_ras_err_handler_data *data = virt->virt_eh_data;
421 	int i;
422 
423 	if (!data)
424 		return true;
425 
426 	for (i = 0; i < data->count; i++)
427 		if (retired_page == data->bps[i].retired_page)
428 			return true;
429 
430 	return false;
431 }
432 
433 static void amdgpu_virt_add_bad_page(struct amdgpu_device *adev,
434 		uint64_t bp_block_offset, uint32_t bp_block_size)
435 {
436 	struct eeprom_table_record bp;
437 	uint64_t retired_page;
438 	uint32_t bp_idx, bp_cnt;
439 	void *vram_usage_va = NULL;
440 
441 	if (adev->mman.fw_vram_usage_va)
442 		vram_usage_va = adev->mman.fw_vram_usage_va;
443 	else
444 		vram_usage_va = adev->mman.drv_vram_usage_va;
445 
446 	memset(&bp, 0, sizeof(bp));
447 
448 	if (bp_block_size) {
449 		bp_cnt = bp_block_size / sizeof(uint64_t);
450 		for (bp_idx = 0; bp_idx < bp_cnt; bp_idx++) {
451 			retired_page = *(uint64_t *)(vram_usage_va +
452 					bp_block_offset + bp_idx * sizeof(uint64_t));
453 			bp.retired_page = retired_page;
454 
455 			if (amdgpu_virt_ras_check_bad_page(adev, retired_page))
456 				continue;
457 
458 			amdgpu_virt_ras_add_bps(adev, &bp, 1);
459 
460 			amdgpu_virt_ras_reserve_bps(adev);
461 		}
462 	}
463 }
464 
465 static int amdgpu_virt_read_pf2vf_data(struct amdgpu_device *adev)
466 {
467 	struct amd_sriov_msg_pf2vf_info_header *pf2vf_info = adev->virt.fw_reserve.p_pf2vf;
468 	uint32_t checksum;
469 	uint32_t checkval;
470 
471 	uint32_t i;
472 	uint32_t tmp;
473 
474 	if (adev->virt.fw_reserve.p_pf2vf == NULL)
475 		return -EINVAL;
476 
477 	if (pf2vf_info->size > 1024) {
478 		dev_err(adev->dev, "invalid pf2vf message size: 0x%x\n", pf2vf_info->size);
479 		return -EINVAL;
480 	}
481 
482 	switch (pf2vf_info->version) {
483 	case 1:
484 		checksum = ((struct amdgim_pf2vf_info_v1 *)pf2vf_info)->checksum;
485 		checkval = amd_sriov_msg_checksum(
486 			adev->virt.fw_reserve.p_pf2vf, pf2vf_info->size,
487 			adev->virt.fw_reserve.checksum_key, checksum);
488 		if (checksum != checkval) {
489 			dev_err(adev->dev,
490 				"invalid pf2vf message: header checksum=0x%x calculated checksum=0x%x\n",
491 				checksum, checkval);
492 			return -EINVAL;
493 		}
494 
495 		adev->virt.gim_feature =
496 			((struct amdgim_pf2vf_info_v1 *)pf2vf_info)->feature_flags;
497 		break;
498 	case 2:
499 		/* TODO: missing key, need to add it later */
500 		checksum = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->checksum;
501 		checkval = amd_sriov_msg_checksum(
502 			adev->virt.fw_reserve.p_pf2vf, pf2vf_info->size,
503 			0, checksum);
504 		if (checksum != checkval) {
505 			dev_err(adev->dev,
506 				"invalid pf2vf message: header checksum=0x%x calculated checksum=0x%x\n",
507 				checksum, checkval);
508 			return -EINVAL;
509 		}
510 
511 		adev->virt.vf2pf_update_interval_ms =
512 			((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->vf2pf_update_interval_ms;
513 		adev->virt.gim_feature =
514 			((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->feature_flags.all;
515 		adev->virt.reg_access =
516 			((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->reg_access_flags.all;
517 
518 		adev->virt.decode_max_dimension_pixels = 0;
519 		adev->virt.decode_max_frame_pixels = 0;
520 		adev->virt.encode_max_dimension_pixels = 0;
521 		adev->virt.encode_max_frame_pixels = 0;
522 		adev->virt.is_mm_bw_enabled = false;
523 		for (i = 0; i < AMD_SRIOV_MSG_RESERVE_VCN_INST; i++) {
524 			tmp = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->mm_bw_management[i].decode_max_dimension_pixels;
525 			adev->virt.decode_max_dimension_pixels = max(tmp, adev->virt.decode_max_dimension_pixels);
526 
527 			tmp = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->mm_bw_management[i].decode_max_frame_pixels;
528 			adev->virt.decode_max_frame_pixels = max(tmp, adev->virt.decode_max_frame_pixels);
529 
530 			tmp = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->mm_bw_management[i].encode_max_dimension_pixels;
531 			adev->virt.encode_max_dimension_pixels = max(tmp, adev->virt.encode_max_dimension_pixels);
532 
533 			tmp = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->mm_bw_management[i].encode_max_frame_pixels;
534 			adev->virt.encode_max_frame_pixels = max(tmp, adev->virt.encode_max_frame_pixels);
535 		}
536 		if ((adev->virt.decode_max_dimension_pixels > 0) || (adev->virt.encode_max_dimension_pixels > 0))
537 			adev->virt.is_mm_bw_enabled = true;
538 
539 		adev->unique_id =
540 			((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->uuid;
541 		adev->virt.ras_en_caps.all = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->ras_en_caps.all;
542 		adev->virt.ras_telemetry_en_caps.all =
543 			((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->ras_telemetry_en_caps.all;
544 		break;
545 	default:
546 		dev_err(adev->dev, "invalid pf2vf version: 0x%x\n", pf2vf_info->version);
547 		return -EINVAL;
548 	}
549 
550 	/* correct too large or too little interval value */
551 	if (adev->virt.vf2pf_update_interval_ms < 200 || adev->virt.vf2pf_update_interval_ms > 10000)
552 		adev->virt.vf2pf_update_interval_ms = 2000;
553 
554 	return 0;
555 }
556 
557 static void amdgpu_virt_populate_vf2pf_ucode_info(struct amdgpu_device *adev)
558 {
559 	struct amd_sriov_msg_vf2pf_info *vf2pf_info;
560 	vf2pf_info = (struct amd_sriov_msg_vf2pf_info *) adev->virt.fw_reserve.p_vf2pf;
561 
562 	if (adev->virt.fw_reserve.p_vf2pf == NULL)
563 		return;
564 
565 	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_VCE,      adev->vce.fw_version);
566 	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_UVD,      adev->uvd.fw_version);
567 	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_MC,       adev->gmc.fw_version);
568 	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_ME,       adev->gfx.me_fw_version);
569 	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_PFP,      adev->gfx.pfp_fw_version);
570 	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_CE,       adev->gfx.ce_fw_version);
571 	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_RLC,      adev->gfx.rlc_fw_version);
572 	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_RLC_SRLC, adev->gfx.rlc_srlc_fw_version);
573 	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_RLC_SRLG, adev->gfx.rlc_srlg_fw_version);
574 	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_RLC_SRLS, adev->gfx.rlc_srls_fw_version);
575 	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_MEC,      adev->gfx.mec_fw_version);
576 	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_MEC2,     adev->gfx.mec2_fw_version);
577 	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SOS,      adev->psp.sos.fw_version);
578 	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_ASD,
579 			    adev->psp.asd_context.bin_desc.fw_version);
580 	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_TA_RAS,
581 			    adev->psp.ras_context.context.bin_desc.fw_version);
582 	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_TA_XGMI,
583 			    adev->psp.xgmi_context.context.bin_desc.fw_version);
584 	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SMC,      adev->pm.fw_version);
585 	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SDMA,     adev->sdma.instance[0].fw_version);
586 	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SDMA2,    adev->sdma.instance[1].fw_version);
587 	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_VCN,      adev->vcn.fw_version);
588 	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_DMCU,     adev->dm.dmcu_fw_version);
589 }
590 
591 static int amdgpu_virt_write_vf2pf_data(struct amdgpu_device *adev)
592 {
593 	struct amd_sriov_msg_vf2pf_info *vf2pf_info;
594 
595 	vf2pf_info = (struct amd_sriov_msg_vf2pf_info *) adev->virt.fw_reserve.p_vf2pf;
596 
597 	if (adev->virt.fw_reserve.p_vf2pf == NULL)
598 		return -EINVAL;
599 
600 	memset(vf2pf_info, 0, sizeof(struct amd_sriov_msg_vf2pf_info));
601 
602 	vf2pf_info->header.size = sizeof(struct amd_sriov_msg_vf2pf_info);
603 	vf2pf_info->header.version = AMD_SRIOV_MSG_FW_VRAM_VF2PF_VER;
604 
605 #ifdef MODULE
606 	if (THIS_MODULE->version != NULL)
607 		strcpy(vf2pf_info->driver_version, THIS_MODULE->version);
608 	else
609 #endif
610 		strcpy(vf2pf_info->driver_version, "N/A");
611 
612 	vf2pf_info->pf2vf_version_required = 0; // no requirement, guest understands all
613 	vf2pf_info->driver_cert = 0;
614 	vf2pf_info->os_info.all = 0;
615 
616 	vf2pf_info->fb_usage = ttm_resource_manager_used(&adev->mman.vram_mgr.manager) ?
617 		 ttm_resource_manager_usage(&adev->mman.vram_mgr.manager) >> 20 : 0;
618 	vf2pf_info->fb_vis_usage =
619 		amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr) >> 20;
620 	vf2pf_info->fb_size = adev->gmc.real_vram_size >> 20;
621 	vf2pf_info->fb_vis_size = adev->gmc.visible_vram_size >> 20;
622 
623 	amdgpu_virt_populate_vf2pf_ucode_info(adev);
624 
625 	/* TODO: read dynamic info */
626 	vf2pf_info->gfx_usage = 0;
627 	vf2pf_info->compute_usage = 0;
628 	vf2pf_info->encode_usage = 0;
629 	vf2pf_info->decode_usage = 0;
630 
631 	vf2pf_info->dummy_page_addr = (uint64_t)adev->dummy_page_addr;
632 	if (amdgpu_sriov_is_mes_info_enable(adev)) {
633 		vf2pf_info->mes_info_addr =
634 			(uint64_t)(adev->mes.resource_1_gpu_addr[0] + AMDGPU_GPU_PAGE_SIZE);
635 		vf2pf_info->mes_info_size =
636 			adev->mes.resource_1[0]->tbo.base.size - AMDGPU_GPU_PAGE_SIZE;
637 	}
638 	vf2pf_info->checksum =
639 		amd_sriov_msg_checksum(
640 		vf2pf_info, sizeof(*vf2pf_info), 0, 0);
641 
642 	return 0;
643 }
644 
645 static void amdgpu_virt_update_vf2pf_work_item(struct work_struct *work)
646 {
647 	struct amdgpu_device *adev = container_of(work, struct amdgpu_device, virt.vf2pf_work.work);
648 	int ret;
649 
650 	ret = amdgpu_virt_read_pf2vf_data(adev);
651 	if (ret) {
652 		adev->virt.vf2pf_update_retry_cnt++;
653 
654 		if ((amdgpu_virt_rcvd_ras_interrupt(adev) ||
655 			adev->virt.vf2pf_update_retry_cnt >= AMDGPU_VF2PF_UPDATE_MAX_RETRY_LIMIT) &&
656 			amdgpu_sriov_runtime(adev)) {
657 
658 			amdgpu_ras_set_fed(adev, true);
659 			if (amdgpu_reset_domain_schedule(adev->reset_domain,
660 							&adev->kfd.reset_work))
661 				return;
662 			else
663 				dev_err(adev->dev, "Failed to queue work! at %s", __func__);
664 		}
665 
666 		goto out;
667 	}
668 
669 	adev->virt.vf2pf_update_retry_cnt = 0;
670 	amdgpu_virt_write_vf2pf_data(adev);
671 
672 out:
673 	schedule_delayed_work(&(adev->virt.vf2pf_work), adev->virt.vf2pf_update_interval_ms);
674 }
675 
676 static int amdgpu_virt_read_exchange_data_from_mem(struct amdgpu_device *adev, uint32_t *pfvf_data)
677 {
678 	uint32_t dataexchange_offset =
679 		adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_DATAEXCHANGE_TABLE_ID].offset;
680 	uint32_t dataexchange_size =
681 		adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_DATAEXCHANGE_TABLE_ID].size_kb << 10;
682 	uint64_t pos = 0;
683 
684 	dev_info(adev->dev,
685 			"Got data exchange info from dynamic crit_region_table at offset 0x%x with size of 0x%x bytes.\n",
686 			dataexchange_offset, dataexchange_size);
687 
688 	if (!IS_ALIGNED(dataexchange_offset, 4) || !IS_ALIGNED(dataexchange_size, 4)) {
689 		dev_err(adev->dev, "Data exchange data not aligned to 4 bytes\n");
690 		return -EINVAL;
691 	}
692 
693 	pos = (uint64_t)dataexchange_offset;
694 	amdgpu_device_vram_access(adev, pos, pfvf_data,
695 			dataexchange_size, false);
696 
697 	return 0;
698 }
699 
700 void amdgpu_virt_fini_data_exchange(struct amdgpu_device *adev)
701 {
702 	if (adev->virt.vf2pf_update_interval_ms != 0) {
703 		dev_info(adev->dev, "clean up the vf2pf work item\n");
704 		cancel_delayed_work_sync(&adev->virt.vf2pf_work);
705 		adev->virt.vf2pf_update_interval_ms = 0;
706 	}
707 }
708 
709 void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
710 {
711 	uint32_t *pfvf_data = NULL;
712 
713 	adev->virt.fw_reserve.p_pf2vf = NULL;
714 	adev->virt.fw_reserve.p_vf2pf = NULL;
715 	adev->virt.vf2pf_update_interval_ms = 0;
716 	adev->virt.vf2pf_update_retry_cnt = 0;
717 
718 	if (adev->mman.fw_vram_usage_va && adev->mman.drv_vram_usage_va) {
719 		dev_warn(adev->dev, "Currently fw_vram and drv_vram should not have values at the same time!");
720 	} else if (adev->mman.fw_vram_usage_va || adev->mman.drv_vram_usage_va) {
721 		/* go through this logic in ip_init and reset to init workqueue*/
722 		amdgpu_virt_exchange_data(adev);
723 
724 		INIT_DELAYED_WORK(&adev->virt.vf2pf_work, amdgpu_virt_update_vf2pf_work_item);
725 		schedule_delayed_work(&(adev->virt.vf2pf_work), msecs_to_jiffies(adev->virt.vf2pf_update_interval_ms));
726 	} else if (adev->bios != NULL) {
727 		/* got through this logic in early init stage to get necessary flags, e.g. rlcg_acc related*/
728 		if (adev->virt.req_init_data_ver == GPU_CRIT_REGION_V2) {
729 			pfvf_data =
730 				kzalloc(adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_DATAEXCHANGE_TABLE_ID].size_kb << 10,
731 					GFP_KERNEL);
732 			if (!pfvf_data) {
733 				dev_err(adev->dev, "Failed to allocate memory for pfvf_data\n");
734 				return;
735 			}
736 
737 			if (amdgpu_virt_read_exchange_data_from_mem(adev, pfvf_data))
738 				goto free_pfvf_data;
739 
740 			adev->virt.fw_reserve.p_pf2vf =
741 				(struct amd_sriov_msg_pf2vf_info_header *)pfvf_data;
742 
743 			amdgpu_virt_read_pf2vf_data(adev);
744 
745 free_pfvf_data:
746 			kfree(pfvf_data);
747 			pfvf_data = NULL;
748 			adev->virt.fw_reserve.p_pf2vf = NULL;
749 		} else {
750 			adev->virt.fw_reserve.p_pf2vf =
751 				(struct amd_sriov_msg_pf2vf_info_header *)
752 				(adev->bios + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB_V1 << 10));
753 
754 			amdgpu_virt_read_pf2vf_data(adev);
755 		}
756 	}
757 }
758 
759 
760 void amdgpu_virt_exchange_data(struct amdgpu_device *adev)
761 {
762 	uint64_t bp_block_offset = 0;
763 	uint32_t bp_block_size = 0;
764 	struct amd_sriov_msg_pf2vf_info *pf2vf_v2 = NULL;
765 
766 	if (adev->mman.fw_vram_usage_va || adev->mman.drv_vram_usage_va) {
767 		if (adev->mman.fw_vram_usage_va) {
768 			if (adev->virt.req_init_data_ver == GPU_CRIT_REGION_V2) {
769 				adev->virt.fw_reserve.p_pf2vf =
770 					(struct amd_sriov_msg_pf2vf_info_header *)
771 					(adev->mman.fw_vram_usage_va +
772 					adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_DATAEXCHANGE_TABLE_ID].offset);
773 				adev->virt.fw_reserve.p_vf2pf =
774 					(struct amd_sriov_msg_vf2pf_info_header *)
775 					(adev->mman.fw_vram_usage_va +
776 					adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_DATAEXCHANGE_TABLE_ID].offset +
777 					(AMD_SRIOV_MSG_SIZE_KB << 10));
778 				adev->virt.fw_reserve.ras_telemetry =
779 					(adev->mman.fw_vram_usage_va +
780 					adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_RAS_TELEMETRY_TABLE_ID].offset);
781 			} else {
782 				adev->virt.fw_reserve.p_pf2vf =
783 					(struct amd_sriov_msg_pf2vf_info_header *)
784 					(adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB_V1 << 10));
785 				adev->virt.fw_reserve.p_vf2pf =
786 					(struct amd_sriov_msg_vf2pf_info_header *)
787 					(adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_VF2PF_OFFSET_KB_V1 << 10));
788 				adev->virt.fw_reserve.ras_telemetry =
789 					(adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_RAS_TELEMETRY_OFFSET_KB_V1 << 10));
790 			}
791 		} else if (adev->mman.drv_vram_usage_va) {
792 			adev->virt.fw_reserve.p_pf2vf =
793 				(struct amd_sriov_msg_pf2vf_info_header *)
794 				(adev->mman.drv_vram_usage_va + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB_V1 << 10));
795 			adev->virt.fw_reserve.p_vf2pf =
796 				(struct amd_sriov_msg_vf2pf_info_header *)
797 				(adev->mman.drv_vram_usage_va + (AMD_SRIOV_MSG_VF2PF_OFFSET_KB_V1 << 10));
798 			adev->virt.fw_reserve.ras_telemetry =
799 				(adev->mman.drv_vram_usage_va + (AMD_SRIOV_MSG_RAS_TELEMETRY_OFFSET_KB_V1 << 10));
800 		}
801 
802 		amdgpu_virt_read_pf2vf_data(adev);
803 		amdgpu_virt_write_vf2pf_data(adev);
804 
805 		/* bad page handling for version 2 */
806 		if (adev->virt.fw_reserve.p_pf2vf->version == 2) {
807 			pf2vf_v2 = (struct amd_sriov_msg_pf2vf_info *)adev->virt.fw_reserve.p_pf2vf;
808 
809 			bp_block_offset = ((uint64_t)pf2vf_v2->bp_block_offset_low & 0xFFFFFFFF) |
810 				((((uint64_t)pf2vf_v2->bp_block_offset_high) << 32) & 0xFFFFFFFF00000000);
811 			bp_block_size = pf2vf_v2->bp_block_size;
812 
813 			if (bp_block_size && !adev->virt.ras_init_done)
814 				amdgpu_virt_init_ras_err_handler_data(adev);
815 
816 			if (adev->virt.ras_init_done)
817 				amdgpu_virt_add_bad_page(adev, bp_block_offset, bp_block_size);
818 		}
819 	}
820 }
821 
822 static u32 amdgpu_virt_init_detect_asic(struct amdgpu_device *adev)
823 {
824 	uint32_t reg;
825 
826 	switch (adev->asic_type) {
827 	case CHIP_TONGA:
828 	case CHIP_FIJI:
829 		reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER);
830 		break;
831 	case CHIP_VEGA10:
832 	case CHIP_VEGA20:
833 	case CHIP_NAVI10:
834 	case CHIP_NAVI12:
835 	case CHIP_SIENNA_CICHLID:
836 	case CHIP_ARCTURUS:
837 	case CHIP_ALDEBARAN:
838 	case CHIP_IP_DISCOVERY:
839 		reg = RREG32(mmRCC_IOV_FUNC_IDENTIFIER);
840 		break;
841 	default: /* other chip doesn't support SRIOV */
842 		reg = 0;
843 		break;
844 	}
845 
846 	if (reg & 1)
847 		adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF;
848 
849 	if (reg & 0x80000000)
850 		adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV;
851 
852 	if (!reg) {
853 		/* passthrough mode exclus sriov mod */
854 		if (is_virtual_machine() && !xen_initial_domain())
855 			adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
856 	}
857 
858 	return reg;
859 }
860 
861 static bool amdgpu_virt_init_req_data(struct amdgpu_device *adev, u32 reg)
862 {
863 	bool is_sriov = false;
864 
865 	/* we have the ability to check now */
866 	if (amdgpu_sriov_vf(adev)) {
867 		is_sriov = true;
868 
869 		switch (adev->asic_type) {
870 		case CHIP_TONGA:
871 		case CHIP_FIJI:
872 			vi_set_virt_ops(adev);
873 			break;
874 		case CHIP_VEGA10:
875 			soc15_set_virt_ops(adev);
876 #ifdef CONFIG_X86
877 			/* not send GPU_INIT_DATA with MS_HYPERV*/
878 			if (!hypervisor_is_type(X86_HYPER_MS_HYPERV))
879 #endif
880 				/* send a dummy GPU_INIT_DATA request to host on vega10 */
881 				amdgpu_virt_request_init_data(adev);
882 			break;
883 		case CHIP_VEGA20:
884 		case CHIP_ARCTURUS:
885 		case CHIP_ALDEBARAN:
886 			soc15_set_virt_ops(adev);
887 			break;
888 		case CHIP_NAVI10:
889 		case CHIP_NAVI12:
890 		case CHIP_SIENNA_CICHLID:
891 		case CHIP_IP_DISCOVERY:
892 			nv_set_virt_ops(adev);
893 			/* try send GPU_INIT_DATA request to host */
894 			amdgpu_virt_request_init_data(adev);
895 			break;
896 		default: /* other chip doesn't support SRIOV */
897 			is_sriov = false;
898 			dev_err(adev->dev, "Unknown asic type: %d!\n", adev->asic_type);
899 			break;
900 		}
901 	}
902 
903 	return is_sriov;
904 }
905 
906 static void amdgpu_virt_init_ras(struct amdgpu_device *adev)
907 {
908 	ratelimit_state_init(&adev->virt.ras.ras_error_cnt_rs, 5 * HZ, 1);
909 	ratelimit_state_init(&adev->virt.ras.ras_cper_dump_rs, 5 * HZ, 1);
910 	ratelimit_state_init(&adev->virt.ras.ras_chk_criti_rs, 5 * HZ, 1);
911 
912 	ratelimit_set_flags(&adev->virt.ras.ras_error_cnt_rs,
913 			    RATELIMIT_MSG_ON_RELEASE);
914 	ratelimit_set_flags(&adev->virt.ras.ras_cper_dump_rs,
915 			    RATELIMIT_MSG_ON_RELEASE);
916 	ratelimit_set_flags(&adev->virt.ras.ras_chk_criti_rs,
917 			    RATELIMIT_MSG_ON_RELEASE);
918 
919 	mutex_init(&adev->virt.ras.ras_telemetry_mutex);
920 
921 	adev->virt.ras.cper_rptr = 0;
922 }
923 
924 static uint8_t amdgpu_virt_crit_region_calc_checksum(uint8_t *buf_start, uint8_t *buf_end)
925 {
926 	uint32_t sum = 0;
927 
928 	if (buf_start >= buf_end)
929 		return 0;
930 
931 	for (; buf_start < buf_end; buf_start++)
932 		sum += buf_start[0];
933 
934 	return 0xffffffff - sum;
935 }
936 
937 int amdgpu_virt_init_critical_region(struct amdgpu_device *adev)
938 {
939 	struct amd_sriov_msg_init_data_header *init_data_hdr = NULL;
940 	u64 init_hdr_offset = adev->virt.init_data_header.offset;
941 	u64 init_hdr_size = (u64)adev->virt.init_data_header.size_kb << 10; /* KB → bytes */
942 	u64 vram_size;
943 	u64 end;
944 	int r = 0;
945 	uint8_t checksum = 0;
946 
947 	/* Skip below init if critical region version != v2 */
948 	if (adev->virt.req_init_data_ver != GPU_CRIT_REGION_V2)
949 		return 0;
950 
951 	if (init_hdr_offset < 0) {
952 		dev_err(adev->dev, "Invalid init header offset\n");
953 		return -EINVAL;
954 	}
955 
956 	vram_size = RREG32(mmRCC_CONFIG_MEMSIZE);
957 	if (!vram_size || vram_size == U32_MAX)
958 		return -EINVAL;
959 	vram_size <<= 20;
960 
961 	if (check_add_overflow(init_hdr_offset, init_hdr_size, &end) || end > vram_size) {
962 		dev_err(adev->dev, "init_data_header exceeds VRAM size, exiting\n");
963 		return -EINVAL;
964 	}
965 
966 	/* Allocate for init_data_hdr */
967 	init_data_hdr = kzalloc(sizeof(struct amd_sriov_msg_init_data_header), GFP_KERNEL);
968 	if (!init_data_hdr)
969 		return -ENOMEM;
970 
971 	amdgpu_device_vram_access(adev, (uint64_t)init_hdr_offset, (uint32_t *)init_data_hdr,
972 					sizeof(struct amd_sriov_msg_init_data_header), false);
973 
974 	/* Table validation */
975 	if (strncmp(init_data_hdr->signature,
976 				AMDGPU_SRIOV_CRIT_DATA_SIGNATURE,
977 				AMDGPU_SRIOV_CRIT_DATA_SIG_LEN) != 0) {
978 		dev_err(adev->dev, "Invalid init data signature: %.4s\n",
979 			init_data_hdr->signature);
980 		r = -EINVAL;
981 		goto out;
982 	}
983 
984 	checksum = amdgpu_virt_crit_region_calc_checksum(
985 			(uint8_t *)&init_data_hdr->initdata_offset,
986 			(uint8_t *)init_data_hdr +
987 			sizeof(struct amd_sriov_msg_init_data_header));
988 	if (checksum != init_data_hdr->checksum) {
989 		dev_err(adev->dev, "Found unmatching checksum from calculation 0x%x and init_data 0x%x\n",
990 				checksum, init_data_hdr->checksum);
991 		r = -EINVAL;
992 		goto out;
993 	}
994 
995 	memset(&adev->virt.crit_regn, 0, sizeof(adev->virt.crit_regn));
996 	memset(adev->virt.crit_regn_tbl, 0, sizeof(adev->virt.crit_regn_tbl));
997 
998 	adev->virt.crit_regn.offset = init_data_hdr->initdata_offset;
999 	adev->virt.crit_regn.size_kb = init_data_hdr->initdata_size_in_kb;
1000 
1001 	/* Validation and initialization for each table entry */
1002 	if (IS_SRIOV_CRIT_REGN_ENTRY_VALID(init_data_hdr, AMD_SRIOV_MSG_IPD_TABLE_ID)) {
1003 		if (!init_data_hdr->ip_discovery_size_in_kb ||
1004 				init_data_hdr->ip_discovery_size_in_kb > DISCOVERY_TMR_SIZE) {
1005 			dev_err(adev->dev, "Invalid %s size: 0x%x\n",
1006 				amdgpu_virt_dynamic_crit_table_name[AMD_SRIOV_MSG_IPD_TABLE_ID],
1007 				init_data_hdr->ip_discovery_size_in_kb);
1008 			r = -EINVAL;
1009 			goto out;
1010 		}
1011 
1012 		adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_IPD_TABLE_ID].offset =
1013 			init_data_hdr->ip_discovery_offset;
1014 		adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_IPD_TABLE_ID].size_kb =
1015 			init_data_hdr->ip_discovery_size_in_kb;
1016 	}
1017 
1018 	if (IS_SRIOV_CRIT_REGN_ENTRY_VALID(init_data_hdr, AMD_SRIOV_MSG_VBIOS_IMG_TABLE_ID)) {
1019 		if (!init_data_hdr->vbios_img_size_in_kb) {
1020 			dev_err(adev->dev, "Invalid %s size: 0x%x\n",
1021 				amdgpu_virt_dynamic_crit_table_name[AMD_SRIOV_MSG_VBIOS_IMG_TABLE_ID],
1022 				init_data_hdr->vbios_img_size_in_kb);
1023 			r = -EINVAL;
1024 			goto out;
1025 		}
1026 
1027 		adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_VBIOS_IMG_TABLE_ID].offset =
1028 			init_data_hdr->vbios_img_offset;
1029 		adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_VBIOS_IMG_TABLE_ID].size_kb =
1030 			init_data_hdr->vbios_img_size_in_kb;
1031 	}
1032 
1033 	if (IS_SRIOV_CRIT_REGN_ENTRY_VALID(init_data_hdr, AMD_SRIOV_MSG_RAS_TELEMETRY_TABLE_ID)) {
1034 		if (!init_data_hdr->ras_tele_info_size_in_kb) {
1035 			dev_err(adev->dev, "Invalid %s size: 0x%x\n",
1036 				amdgpu_virt_dynamic_crit_table_name[AMD_SRIOV_MSG_RAS_TELEMETRY_TABLE_ID],
1037 				init_data_hdr->ras_tele_info_size_in_kb);
1038 			r = -EINVAL;
1039 			goto out;
1040 		}
1041 
1042 		adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_RAS_TELEMETRY_TABLE_ID].offset =
1043 			init_data_hdr->ras_tele_info_offset;
1044 		adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_RAS_TELEMETRY_TABLE_ID].size_kb =
1045 			init_data_hdr->ras_tele_info_size_in_kb;
1046 	}
1047 
1048 	if (IS_SRIOV_CRIT_REGN_ENTRY_VALID(init_data_hdr, AMD_SRIOV_MSG_DATAEXCHANGE_TABLE_ID)) {
1049 		if (!init_data_hdr->dataexchange_size_in_kb) {
1050 			dev_err(adev->dev, "Invalid %s size: 0x%x\n",
1051 				amdgpu_virt_dynamic_crit_table_name[AMD_SRIOV_MSG_DATAEXCHANGE_TABLE_ID],
1052 				init_data_hdr->dataexchange_size_in_kb);
1053 			r = -EINVAL;
1054 			goto out;
1055 		}
1056 
1057 		adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_DATAEXCHANGE_TABLE_ID].offset =
1058 			init_data_hdr->dataexchange_offset;
1059 		adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_DATAEXCHANGE_TABLE_ID].size_kb =
1060 			init_data_hdr->dataexchange_size_in_kb;
1061 	}
1062 
1063 	if (IS_SRIOV_CRIT_REGN_ENTRY_VALID(init_data_hdr, AMD_SRIOV_MSG_BAD_PAGE_INFO_TABLE_ID)) {
1064 		if (!init_data_hdr->bad_page_size_in_kb) {
1065 			dev_err(adev->dev, "Invalid %s size: 0x%x\n",
1066 				amdgpu_virt_dynamic_crit_table_name[AMD_SRIOV_MSG_BAD_PAGE_INFO_TABLE_ID],
1067 				init_data_hdr->bad_page_size_in_kb);
1068 			r = -EINVAL;
1069 			goto out;
1070 		}
1071 
1072 		adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_BAD_PAGE_INFO_TABLE_ID].offset =
1073 			init_data_hdr->bad_page_info_offset;
1074 		adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_BAD_PAGE_INFO_TABLE_ID].size_kb =
1075 			init_data_hdr->bad_page_size_in_kb;
1076 	}
1077 
1078 	/* Validation for critical region info */
1079 	if (adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_IPD_TABLE_ID].size_kb > DISCOVERY_TMR_SIZE) {
1080 		dev_err(adev->dev, "Invalid IP discovery size: 0x%x\n",
1081 				adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_IPD_TABLE_ID].size_kb);
1082 		r = -EINVAL;
1083 		goto out;
1084 	}
1085 
1086 	/* reserved memory starts from crit region base offset with the size of 5MB */
1087 	adev->mman.fw_vram_usage_start_offset = adev->virt.crit_regn.offset;
1088 	adev->mman.fw_vram_usage_size = adev->virt.crit_regn.size_kb << 10;
1089 	dev_info(adev->dev,
1090 		"critical region v%d requested to reserve memory start at %08llx with %llu KB.\n",
1091 			init_data_hdr->version,
1092 			adev->mman.fw_vram_usage_start_offset,
1093 			adev->mman.fw_vram_usage_size >> 10);
1094 
1095 	adev->virt.is_dynamic_crit_regn_enabled = true;
1096 
1097 out:
1098 	kfree(init_data_hdr);
1099 	init_data_hdr = NULL;
1100 
1101 	return r;
1102 }
1103 
1104 int amdgpu_virt_get_dynamic_data_info(struct amdgpu_device *adev,
1105 	int data_id, uint8_t *binary, u32 *size)
1106 {
1107 	uint32_t data_offset = 0;
1108 	uint32_t data_size = 0;
1109 	enum amd_sriov_msg_table_id_enum data_table_id = data_id;
1110 
1111 	if (data_table_id >= AMD_SRIOV_MSG_MAX_TABLE_ID)
1112 		return -EINVAL;
1113 
1114 	data_offset = adev->virt.crit_regn_tbl[data_table_id].offset;
1115 	data_size = adev->virt.crit_regn_tbl[data_table_id].size_kb << 10;
1116 
1117 	/* Validate on input params */
1118 	if (!binary || !size || *size < (uint64_t)data_size)
1119 		return -EINVAL;
1120 
1121 	/* Proceed to copy the dynamic content */
1122 	amdgpu_device_vram_access(adev,
1123 			(uint64_t)data_offset, (uint32_t *)binary, data_size, false);
1124 	*size = (uint64_t)data_size;
1125 
1126 	dev_dbg(adev->dev,
1127 		"Got %s info from dynamic crit_region_table at offset 0x%x with size of 0x%x bytes.\n",
1128 		amdgpu_virt_dynamic_crit_table_name[data_id], data_offset, data_size);
1129 
1130 	return 0;
1131 }
1132 
1133 void amdgpu_virt_init(struct amdgpu_device *adev)
1134 {
1135 	bool is_sriov = false;
1136 	uint32_t reg = amdgpu_virt_init_detect_asic(adev);
1137 
1138 	is_sriov = amdgpu_virt_init_req_data(adev, reg);
1139 
1140 	if (is_sriov)
1141 		amdgpu_virt_init_ras(adev);
1142 }
1143 
1144 static bool amdgpu_virt_access_debugfs_is_mmio(struct amdgpu_device *adev)
1145 {
1146 	return amdgpu_sriov_is_debug(adev) ? true : false;
1147 }
1148 
1149 static bool amdgpu_virt_access_debugfs_is_kiq(struct amdgpu_device *adev)
1150 {
1151 	return amdgpu_sriov_is_normal(adev) ? true : false;
1152 }
1153 
1154 int amdgpu_virt_enable_access_debugfs(struct amdgpu_device *adev)
1155 {
1156 	if (!amdgpu_sriov_vf(adev) ||
1157 	    amdgpu_virt_access_debugfs_is_kiq(adev))
1158 		return 0;
1159 
1160 	if (amdgpu_virt_access_debugfs_is_mmio(adev))
1161 		adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
1162 	else
1163 		return -EPERM;
1164 
1165 	return 0;
1166 }
1167 
1168 void amdgpu_virt_disable_access_debugfs(struct amdgpu_device *adev)
1169 {
1170 	if (amdgpu_sriov_vf(adev))
1171 		adev->virt.caps |= AMDGPU_SRIOV_CAPS_RUNTIME;
1172 }
1173 
1174 enum amdgpu_sriov_vf_mode amdgpu_virt_get_sriov_vf_mode(struct amdgpu_device *adev)
1175 {
1176 	enum amdgpu_sriov_vf_mode mode;
1177 
1178 	if (amdgpu_sriov_vf(adev)) {
1179 		if (amdgpu_sriov_is_pp_one_vf(adev))
1180 			mode = SRIOV_VF_MODE_ONE_VF;
1181 		else
1182 			mode = SRIOV_VF_MODE_MULTI_VF;
1183 	} else {
1184 		mode = SRIOV_VF_MODE_BARE_METAL;
1185 	}
1186 
1187 	return mode;
1188 }
1189 
1190 void amdgpu_virt_pre_reset(struct amdgpu_device *adev)
1191 {
1192 	/* stop the data exchange thread */
1193 	amdgpu_virt_fini_data_exchange(adev);
1194 	amdgpu_dpm_set_mp1_state(adev, PP_MP1_STATE_FLR);
1195 }
1196 
1197 void amdgpu_virt_post_reset(struct amdgpu_device *adev)
1198 {
1199 	if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 0, 3)) {
1200 		/* force set to GFXOFF state after reset,
1201 		 * to avoid some invalid operation before GC enable
1202 		 */
1203 		adev->gfx.is_poweron = false;
1204 	}
1205 
1206 	adev->mes.ring[0].sched.ready = false;
1207 }
1208 
1209 bool amdgpu_virt_fw_load_skip_check(struct amdgpu_device *adev, uint32_t ucode_id)
1210 {
1211 	switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
1212 	case IP_VERSION(13, 0, 0):
1213 		/* no vf autoload, white list */
1214 		if (ucode_id == AMDGPU_UCODE_ID_VCN1 ||
1215 		    ucode_id == AMDGPU_UCODE_ID_VCN)
1216 			return false;
1217 		else
1218 			return true;
1219 	case IP_VERSION(11, 0, 9):
1220 	case IP_VERSION(11, 0, 7):
1221 		/* black list for CHIP_NAVI12 and CHIP_SIENNA_CICHLID */
1222 		if (ucode_id == AMDGPU_UCODE_ID_RLC_G
1223 		    || ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL
1224 		    || ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM
1225 		    || ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM
1226 		    || ucode_id == AMDGPU_UCODE_ID_SMC)
1227 			return true;
1228 		else
1229 			return false;
1230 	case IP_VERSION(13, 0, 10):
1231 		/* white list */
1232 		if (ucode_id == AMDGPU_UCODE_ID_CAP
1233 		|| ucode_id == AMDGPU_UCODE_ID_CP_RS64_PFP
1234 		|| ucode_id == AMDGPU_UCODE_ID_CP_RS64_ME
1235 		|| ucode_id == AMDGPU_UCODE_ID_CP_RS64_MEC
1236 		|| ucode_id == AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK
1237 		|| ucode_id == AMDGPU_UCODE_ID_CP_RS64_PFP_P1_STACK
1238 		|| ucode_id == AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK
1239 		|| ucode_id == AMDGPU_UCODE_ID_CP_RS64_ME_P1_STACK
1240 		|| ucode_id == AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK
1241 		|| ucode_id == AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK
1242 		|| ucode_id == AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK
1243 		|| ucode_id == AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK
1244 		|| ucode_id == AMDGPU_UCODE_ID_CP_MES
1245 		|| ucode_id == AMDGPU_UCODE_ID_CP_MES_DATA
1246 		|| ucode_id == AMDGPU_UCODE_ID_CP_MES1
1247 		|| ucode_id == AMDGPU_UCODE_ID_CP_MES1_DATA
1248 		|| ucode_id == AMDGPU_UCODE_ID_VCN1
1249 		|| ucode_id == AMDGPU_UCODE_ID_VCN)
1250 			return false;
1251 		else
1252 			return true;
1253 	default:
1254 		/* lagacy black list */
1255 		if (ucode_id == AMDGPU_UCODE_ID_SDMA0
1256 		    || ucode_id == AMDGPU_UCODE_ID_SDMA1
1257 		    || ucode_id == AMDGPU_UCODE_ID_SDMA2
1258 		    || ucode_id == AMDGPU_UCODE_ID_SDMA3
1259 		    || ucode_id == AMDGPU_UCODE_ID_SDMA4
1260 		    || ucode_id == AMDGPU_UCODE_ID_SDMA5
1261 		    || ucode_id == AMDGPU_UCODE_ID_SDMA6
1262 		    || ucode_id == AMDGPU_UCODE_ID_SDMA7
1263 		    || ucode_id == AMDGPU_UCODE_ID_RLC_G
1264 		    || ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL
1265 		    || ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM
1266 		    || ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM
1267 		    || ucode_id == AMDGPU_UCODE_ID_SMC)
1268 			return true;
1269 		else
1270 			return false;
1271 	}
1272 }
1273 
1274 void amdgpu_virt_update_sriov_video_codec(struct amdgpu_device *adev,
1275 			struct amdgpu_video_codec_info *encode, uint32_t encode_array_size,
1276 			struct amdgpu_video_codec_info *decode, uint32_t decode_array_size)
1277 {
1278 	uint32_t i;
1279 
1280 	if (!adev->virt.is_mm_bw_enabled)
1281 		return;
1282 
1283 	if (encode) {
1284 		for (i = 0; i < encode_array_size; i++) {
1285 			encode[i].max_width = adev->virt.encode_max_dimension_pixels;
1286 			encode[i].max_pixels_per_frame = adev->virt.encode_max_frame_pixels;
1287 			if (encode[i].max_width > 0)
1288 				encode[i].max_height = encode[i].max_pixels_per_frame / encode[i].max_width;
1289 			else
1290 				encode[i].max_height = 0;
1291 		}
1292 	}
1293 
1294 	if (decode) {
1295 		for (i = 0; i < decode_array_size; i++) {
1296 			decode[i].max_width = adev->virt.decode_max_dimension_pixels;
1297 			decode[i].max_pixels_per_frame = adev->virt.decode_max_frame_pixels;
1298 			if (decode[i].max_width > 0)
1299 				decode[i].max_height = decode[i].max_pixels_per_frame / decode[i].max_width;
1300 			else
1301 				decode[i].max_height = 0;
1302 		}
1303 	}
1304 }
1305 
1306 bool amdgpu_virt_get_rlcg_reg_access_flag(struct amdgpu_device *adev,
1307 						 u32 acc_flags, u32 hwip,
1308 						 bool write, u32 *rlcg_flag)
1309 {
1310 	bool ret = false;
1311 
1312 	switch (hwip) {
1313 	case GC_HWIP:
1314 		if (amdgpu_sriov_reg_indirect_gc(adev)) {
1315 			*rlcg_flag =
1316 				write ? AMDGPU_RLCG_GC_WRITE : AMDGPU_RLCG_GC_READ;
1317 			ret = true;
1318 		/* only in new version, AMDGPU_REGS_NO_KIQ and
1319 		 * AMDGPU_REGS_RLC are enabled simultaneously */
1320 		} else if ((acc_flags & AMDGPU_REGS_RLC) &&
1321 				!(acc_flags & AMDGPU_REGS_NO_KIQ) && write) {
1322 			*rlcg_flag = AMDGPU_RLCG_GC_WRITE_LEGACY;
1323 			ret = true;
1324 		}
1325 		break;
1326 	case MMHUB_HWIP:
1327 		if (amdgpu_sriov_reg_indirect_mmhub(adev) &&
1328 		    (acc_flags & AMDGPU_REGS_RLC) && write) {
1329 			*rlcg_flag = AMDGPU_RLCG_MMHUB_WRITE;
1330 			ret = true;
1331 		}
1332 		break;
1333 	default:
1334 		break;
1335 	}
1336 	return ret;
1337 }
1338 
1339 u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v, u32 flag, u32 xcc_id)
1340 {
1341 	struct amdgpu_rlcg_reg_access_ctrl *reg_access_ctrl;
1342 	uint32_t timeout = 50000;
1343 	uint32_t i, tmp;
1344 	uint32_t ret = 0;
1345 	void *scratch_reg0;
1346 	void *scratch_reg1;
1347 	void *scratch_reg2;
1348 	void *scratch_reg3;
1349 	void *spare_int;
1350 	unsigned long flags;
1351 
1352 	if (!adev->gfx.rlc.rlcg_reg_access_supported) {
1353 		dev_err(adev->dev,
1354 			"indirect registers access through rlcg is not available\n");
1355 		return 0;
1356 	}
1357 
1358 	if (adev->gfx.xcc_mask && (((1 << xcc_id) & adev->gfx.xcc_mask) == 0)) {
1359 		dev_err(adev->dev, "invalid xcc\n");
1360 		return 0;
1361 	}
1362 
1363 	if (amdgpu_device_skip_hw_access(adev))
1364 		return 0;
1365 
1366 	reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl[xcc_id];
1367 	scratch_reg0 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg0;
1368 	scratch_reg1 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg1;
1369 	scratch_reg2 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg2;
1370 	scratch_reg3 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg3;
1371 
1372 	spin_lock_irqsave(&adev->virt.rlcg_reg_lock, flags);
1373 
1374 	if (reg_access_ctrl->spare_int)
1375 		spare_int = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->spare_int;
1376 
1377 	if (offset == reg_access_ctrl->grbm_cntl) {
1378 		/* if the target reg offset is grbm_cntl, write to scratch_reg2 */
1379 		writel(v, scratch_reg2);
1380 		if (flag == AMDGPU_RLCG_GC_WRITE_LEGACY)
1381 			writel(v, ((void __iomem *)adev->rmmio) + (offset * 4));
1382 	} else if (offset == reg_access_ctrl->grbm_idx) {
1383 		/* if the target reg offset is grbm_idx, write to scratch_reg3 */
1384 		writel(v, scratch_reg3);
1385 		if (flag == AMDGPU_RLCG_GC_WRITE_LEGACY)
1386 			writel(v, ((void __iomem *)adev->rmmio) + (offset * 4));
1387 	} else {
1388 		/*
1389 		 * SCRATCH_REG0 	= read/write value
1390 		 * SCRATCH_REG1[30:28]	= command
1391 		 * SCRATCH_REG1[19:0]	= address in dword
1392 		 * SCRATCH_REG1[27:24]	= Error reporting
1393 		 */
1394 		writel(v, scratch_reg0);
1395 		writel((offset | flag), scratch_reg1);
1396 		if (reg_access_ctrl->spare_int)
1397 			writel(1, spare_int);
1398 
1399 		for (i = 0; i < timeout; i++) {
1400 			tmp = readl(scratch_reg1);
1401 			if (!(tmp & AMDGPU_RLCG_SCRATCH1_ADDRESS_MASK))
1402 				break;
1403 			udelay(10);
1404 		}
1405 
1406 		tmp = readl(scratch_reg1);
1407 		if (i >= timeout || (tmp & AMDGPU_RLCG_SCRATCH1_ERROR_MASK) != 0) {
1408 			if (amdgpu_sriov_rlcg_error_report_enabled(adev)) {
1409 				if (tmp & AMDGPU_RLCG_VFGATE_DISABLED) {
1410 					dev_err(adev->dev,
1411 						"vfgate is disabled, rlcg failed to program reg: 0x%05x\n", offset);
1412 				} else if (tmp & AMDGPU_RLCG_WRONG_OPERATION_TYPE) {
1413 					dev_err(adev->dev,
1414 						"wrong operation type, rlcg failed to program reg: 0x%05x\n", offset);
1415 				} else if (tmp & AMDGPU_RLCG_REG_NOT_IN_RANGE) {
1416 					dev_err(adev->dev,
1417 						"register is not in range, rlcg failed to program reg: 0x%05x\n", offset);
1418 				} else {
1419 					dev_err(adev->dev,
1420 						"unknown error type, rlcg failed to program reg: 0x%05x\n", offset);
1421 				}
1422 			} else {
1423 				dev_err(adev->dev,
1424 					"timeout: rlcg faled to program reg: 0x%05x\n", offset);
1425 			}
1426 		}
1427 	}
1428 
1429 	ret = readl(scratch_reg0);
1430 
1431 	spin_unlock_irqrestore(&adev->virt.rlcg_reg_lock, flags);
1432 
1433 	return ret;
1434 }
1435 
1436 void amdgpu_sriov_wreg(struct amdgpu_device *adev,
1437 		       u32 offset, u32 value,
1438 		       u32 acc_flags, u32 hwip, u32 xcc_id)
1439 {
1440 	u32 rlcg_flag;
1441 
1442 	if (amdgpu_device_skip_hw_access(adev))
1443 		return;
1444 
1445 	if (!amdgpu_sriov_runtime(adev) &&
1446 		amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags, hwip, true, &rlcg_flag)) {
1447 		amdgpu_virt_rlcg_reg_rw(adev, offset, value, rlcg_flag, xcc_id);
1448 		return;
1449 	}
1450 
1451 	if (acc_flags & AMDGPU_REGS_NO_KIQ)
1452 		WREG32_NO_KIQ(offset, value);
1453 	else
1454 		WREG32(offset, value);
1455 }
1456 
1457 u32 amdgpu_sriov_rreg(struct amdgpu_device *adev,
1458 		      u32 offset, u32 acc_flags, u32 hwip, u32 xcc_id)
1459 {
1460 	u32 rlcg_flag;
1461 
1462 	if (amdgpu_device_skip_hw_access(adev))
1463 		return 0;
1464 
1465 	if (!amdgpu_sriov_runtime(adev) &&
1466 		amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags, hwip, false, &rlcg_flag))
1467 		return amdgpu_virt_rlcg_reg_rw(adev, offset, 0, rlcg_flag, xcc_id);
1468 
1469 	if (acc_flags & AMDGPU_REGS_NO_KIQ)
1470 		return RREG32_NO_KIQ(offset);
1471 	else
1472 		return RREG32(offset);
1473 }
1474 
1475 bool amdgpu_sriov_xnack_support(struct amdgpu_device *adev)
1476 {
1477 	bool xnack_mode = true;
1478 
1479 	if (amdgpu_sriov_vf(adev) &&
1480 	    amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2))
1481 		xnack_mode = false;
1482 
1483 	return xnack_mode;
1484 }
1485 
1486 bool amdgpu_virt_get_ras_capability(struct amdgpu_device *adev)
1487 {
1488 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1489 
1490 	if (!amdgpu_sriov_ras_caps_en(adev))
1491 		return false;
1492 
1493 	if (adev->virt.ras_en_caps.bits.block_umc)
1494 		adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__UMC);
1495 	if (adev->virt.ras_en_caps.bits.block_sdma)
1496 		adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__SDMA);
1497 	if (adev->virt.ras_en_caps.bits.block_gfx)
1498 		adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__GFX);
1499 	if (adev->virt.ras_en_caps.bits.block_mmhub)
1500 		adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__MMHUB);
1501 	if (adev->virt.ras_en_caps.bits.block_athub)
1502 		adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__ATHUB);
1503 	if (adev->virt.ras_en_caps.bits.block_pcie_bif)
1504 		adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__PCIE_BIF);
1505 	if (adev->virt.ras_en_caps.bits.block_hdp)
1506 		adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__HDP);
1507 	if (adev->virt.ras_en_caps.bits.block_xgmi_wafl)
1508 		adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__XGMI_WAFL);
1509 	if (adev->virt.ras_en_caps.bits.block_df)
1510 		adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__DF);
1511 	if (adev->virt.ras_en_caps.bits.block_smn)
1512 		adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__SMN);
1513 	if (adev->virt.ras_en_caps.bits.block_sem)
1514 		adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__SEM);
1515 	if (adev->virt.ras_en_caps.bits.block_mp0)
1516 		adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__MP0);
1517 	if (adev->virt.ras_en_caps.bits.block_mp1)
1518 		adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__MP1);
1519 	if (adev->virt.ras_en_caps.bits.block_fuse)
1520 		adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__FUSE);
1521 	if (adev->virt.ras_en_caps.bits.block_mca)
1522 		adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__MCA);
1523 	if (adev->virt.ras_en_caps.bits.block_vcn)
1524 		adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__VCN);
1525 	if (adev->virt.ras_en_caps.bits.block_jpeg)
1526 		adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__JPEG);
1527 	if (adev->virt.ras_en_caps.bits.block_ih)
1528 		adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__IH);
1529 	if (adev->virt.ras_en_caps.bits.block_mpio)
1530 		adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__MPIO);
1531 
1532 	if (adev->virt.ras_en_caps.bits.poison_propogation_mode)
1533 		con->poison_supported = true; /* Poison is handled by host */
1534 
1535 	return true;
1536 }
1537 
1538 static inline enum amd_sriov_ras_telemetry_gpu_block
1539 amdgpu_ras_block_to_sriov(struct amdgpu_device *adev, enum amdgpu_ras_block block) {
1540 	switch (block) {
1541 	case AMDGPU_RAS_BLOCK__UMC:
1542 		return RAS_TELEMETRY_GPU_BLOCK_UMC;
1543 	case AMDGPU_RAS_BLOCK__SDMA:
1544 		return RAS_TELEMETRY_GPU_BLOCK_SDMA;
1545 	case AMDGPU_RAS_BLOCK__GFX:
1546 		return RAS_TELEMETRY_GPU_BLOCK_GFX;
1547 	case AMDGPU_RAS_BLOCK__MMHUB:
1548 		return RAS_TELEMETRY_GPU_BLOCK_MMHUB;
1549 	case AMDGPU_RAS_BLOCK__ATHUB:
1550 		return RAS_TELEMETRY_GPU_BLOCK_ATHUB;
1551 	case AMDGPU_RAS_BLOCK__PCIE_BIF:
1552 		return RAS_TELEMETRY_GPU_BLOCK_PCIE_BIF;
1553 	case AMDGPU_RAS_BLOCK__HDP:
1554 		return RAS_TELEMETRY_GPU_BLOCK_HDP;
1555 	case AMDGPU_RAS_BLOCK__XGMI_WAFL:
1556 		return RAS_TELEMETRY_GPU_BLOCK_XGMI_WAFL;
1557 	case AMDGPU_RAS_BLOCK__DF:
1558 		return RAS_TELEMETRY_GPU_BLOCK_DF;
1559 	case AMDGPU_RAS_BLOCK__SMN:
1560 		return RAS_TELEMETRY_GPU_BLOCK_SMN;
1561 	case AMDGPU_RAS_BLOCK__SEM:
1562 		return RAS_TELEMETRY_GPU_BLOCK_SEM;
1563 	case AMDGPU_RAS_BLOCK__MP0:
1564 		return RAS_TELEMETRY_GPU_BLOCK_MP0;
1565 	case AMDGPU_RAS_BLOCK__MP1:
1566 		return RAS_TELEMETRY_GPU_BLOCK_MP1;
1567 	case AMDGPU_RAS_BLOCK__FUSE:
1568 		return RAS_TELEMETRY_GPU_BLOCK_FUSE;
1569 	case AMDGPU_RAS_BLOCK__MCA:
1570 		return RAS_TELEMETRY_GPU_BLOCK_MCA;
1571 	case AMDGPU_RAS_BLOCK__VCN:
1572 		return RAS_TELEMETRY_GPU_BLOCK_VCN;
1573 	case AMDGPU_RAS_BLOCK__JPEG:
1574 		return RAS_TELEMETRY_GPU_BLOCK_JPEG;
1575 	case AMDGPU_RAS_BLOCK__IH:
1576 		return RAS_TELEMETRY_GPU_BLOCK_IH;
1577 	case AMDGPU_RAS_BLOCK__MPIO:
1578 		return RAS_TELEMETRY_GPU_BLOCK_MPIO;
1579 	default:
1580 		dev_warn(adev->dev, "Unsupported SRIOV RAS telemetry block 0x%x\n",
1581 			      block);
1582 		return RAS_TELEMETRY_GPU_BLOCK_COUNT;
1583 	}
1584 }
1585 
1586 static int amdgpu_virt_cache_host_error_counts(struct amdgpu_device *adev,
1587 					       struct amdsriov_ras_telemetry *host_telemetry)
1588 {
1589 	struct amd_sriov_ras_telemetry_error_count *tmp = NULL;
1590 	uint32_t checksum, used_size;
1591 
1592 	checksum = host_telemetry->header.checksum;
1593 	used_size = host_telemetry->header.used_size;
1594 
1595 	if (used_size > (AMD_SRIOV_MSG_RAS_TELEMETRY_SIZE_KB_V1 << 10))
1596 		return 0;
1597 
1598 	tmp = kmemdup(&host_telemetry->body.error_count, used_size, GFP_KERNEL);
1599 	if (!tmp)
1600 		return -ENOMEM;
1601 
1602 	if (checksum != amd_sriov_msg_checksum(tmp, used_size, 0, 0))
1603 		goto out;
1604 
1605 	memcpy(&adev->virt.count_cache, tmp,
1606 	       min(used_size, sizeof(adev->virt.count_cache)));
1607 out:
1608 	kfree(tmp);
1609 
1610 	return 0;
1611 }
1612 
1613 static int amdgpu_virt_req_ras_err_count_internal(struct amdgpu_device *adev, bool force_update)
1614 {
1615 	struct amdgpu_virt *virt = &adev->virt;
1616 
1617 	if (!virt->ops || !virt->ops->req_ras_err_count)
1618 		return -EOPNOTSUPP;
1619 
1620 	/* Host allows 15 ras telemetry requests per 60 seconds. Afterwhich, the Host
1621 	 * will ignore incoming guest messages. Ratelimit the guest messages to
1622 	 * prevent guest self DOS.
1623 	 */
1624 	if (__ratelimit(&virt->ras.ras_error_cnt_rs) || force_update) {
1625 		mutex_lock(&virt->ras.ras_telemetry_mutex);
1626 		if (!virt->ops->req_ras_err_count(adev))
1627 			amdgpu_virt_cache_host_error_counts(adev,
1628 				virt->fw_reserve.ras_telemetry);
1629 		mutex_unlock(&virt->ras.ras_telemetry_mutex);
1630 	}
1631 
1632 	return 0;
1633 }
1634 
1635 /* Bypass ACA interface and query ECC counts directly from host */
1636 int amdgpu_virt_req_ras_err_count(struct amdgpu_device *adev, enum amdgpu_ras_block block,
1637 				  struct ras_err_data *err_data)
1638 {
1639 	enum amd_sriov_ras_telemetry_gpu_block sriov_block;
1640 
1641 	sriov_block = amdgpu_ras_block_to_sriov(adev, block);
1642 
1643 	if (sriov_block >= RAS_TELEMETRY_GPU_BLOCK_COUNT ||
1644 	    !amdgpu_sriov_ras_telemetry_block_en(adev, sriov_block))
1645 		return -EOPNOTSUPP;
1646 
1647 	/* Host Access may be lost during reset, just return last cached data. */
1648 	if (down_read_trylock(&adev->reset_domain->sem)) {
1649 		amdgpu_virt_req_ras_err_count_internal(adev, false);
1650 		up_read(&adev->reset_domain->sem);
1651 	}
1652 
1653 	err_data->ue_count = adev->virt.count_cache.block[sriov_block].ue_count;
1654 	err_data->ce_count = adev->virt.count_cache.block[sriov_block].ce_count;
1655 	err_data->de_count = adev->virt.count_cache.block[sriov_block].de_count;
1656 
1657 	return 0;
1658 }
1659 
1660 static int
1661 amdgpu_virt_write_cpers_to_ring(struct amdgpu_device *adev,
1662 				struct amdsriov_ras_telemetry *host_telemetry,
1663 				u32 *more)
1664 {
1665 	struct amd_sriov_ras_cper_dump *cper_dump = NULL;
1666 	struct cper_hdr *entry = NULL;
1667 	struct amdgpu_ring *ring = &adev->cper.ring_buf;
1668 	uint32_t checksum, used_size, i;
1669 	int ret = 0;
1670 
1671 	checksum = host_telemetry->header.checksum;
1672 	used_size = host_telemetry->header.used_size;
1673 
1674 	if (used_size > (AMD_SRIOV_MSG_RAS_TELEMETRY_SIZE_KB_V1 << 10))
1675 		return -EINVAL;
1676 
1677 	cper_dump = kmemdup(&host_telemetry->body.cper_dump, used_size, GFP_KERNEL);
1678 	if (!cper_dump)
1679 		return -ENOMEM;
1680 
1681 	if (checksum != amd_sriov_msg_checksum(cper_dump, used_size, 0, 0)) {
1682 		ret = -EINVAL;
1683 		goto out;
1684 	}
1685 
1686 	*more = cper_dump->more;
1687 
1688 	if (cper_dump->wptr < adev->virt.ras.cper_rptr) {
1689 		dev_warn(
1690 			adev->dev,
1691 			"guest specified rptr that was too high! guest rptr: 0x%llx, host rptr: 0x%llx\n",
1692 			adev->virt.ras.cper_rptr, cper_dump->wptr);
1693 
1694 		adev->virt.ras.cper_rptr = cper_dump->wptr;
1695 		goto out;
1696 	}
1697 
1698 	entry = (struct cper_hdr *)&cper_dump->buf[0];
1699 
1700 	for (i = 0; i < cper_dump->count; i++) {
1701 		amdgpu_cper_ring_write(ring, entry, entry->record_length);
1702 		entry = (struct cper_hdr *)((char *)entry +
1703 					    entry->record_length);
1704 	}
1705 
1706 	if (cper_dump->overflow_count)
1707 		dev_warn(adev->dev,
1708 			 "host reported CPER overflow of 0x%llx entries!\n",
1709 			 cper_dump->overflow_count);
1710 
1711 	adev->virt.ras.cper_rptr = cper_dump->wptr;
1712 out:
1713 	kfree(cper_dump);
1714 
1715 	return ret;
1716 }
1717 
1718 static int amdgpu_virt_req_ras_cper_dump_internal(struct amdgpu_device *adev)
1719 {
1720 	struct amdgpu_virt *virt = &adev->virt;
1721 	int ret = 0;
1722 	uint32_t more = 0;
1723 
1724 	if (!virt->ops || !virt->ops->req_ras_cper_dump)
1725 		return -EOPNOTSUPP;
1726 
1727 	do {
1728 		if (!virt->ops->req_ras_cper_dump(adev, virt->ras.cper_rptr))
1729 			ret = amdgpu_virt_write_cpers_to_ring(
1730 				adev, virt->fw_reserve.ras_telemetry, &more);
1731 		else
1732 			ret = 0;
1733 	} while (more && !ret);
1734 
1735 	return ret;
1736 }
1737 
1738 int amdgpu_virt_req_ras_cper_dump(struct amdgpu_device *adev, bool force_update)
1739 {
1740 	struct amdgpu_virt *virt = &adev->virt;
1741 	int ret = 0;
1742 
1743 	if (!amdgpu_sriov_ras_cper_en(adev))
1744 		return -EOPNOTSUPP;
1745 
1746 	if ((__ratelimit(&virt->ras.ras_cper_dump_rs) || force_update) &&
1747 	    down_read_trylock(&adev->reset_domain->sem)) {
1748 		mutex_lock(&virt->ras.ras_telemetry_mutex);
1749 		ret = amdgpu_virt_req_ras_cper_dump_internal(adev);
1750 		mutex_unlock(&virt->ras.ras_telemetry_mutex);
1751 		up_read(&adev->reset_domain->sem);
1752 	}
1753 
1754 	return ret;
1755 }
1756 
1757 int amdgpu_virt_ras_telemetry_post_reset(struct amdgpu_device *adev)
1758 {
1759 	unsigned long ue_count, ce_count;
1760 
1761 	if (amdgpu_sriov_ras_telemetry_en(adev)) {
1762 		amdgpu_virt_req_ras_err_count_internal(adev, true);
1763 		amdgpu_ras_query_error_count(adev, &ce_count, &ue_count, NULL);
1764 	}
1765 
1766 	return 0;
1767 }
1768 
1769 bool amdgpu_virt_ras_telemetry_block_en(struct amdgpu_device *adev,
1770 					enum amdgpu_ras_block block)
1771 {
1772 	enum amd_sriov_ras_telemetry_gpu_block sriov_block;
1773 
1774 	sriov_block = amdgpu_ras_block_to_sriov(adev, block);
1775 
1776 	if (sriov_block >= RAS_TELEMETRY_GPU_BLOCK_COUNT ||
1777 	    !amdgpu_sriov_ras_telemetry_block_en(adev, sriov_block))
1778 		return false;
1779 
1780 	return true;
1781 }
1782 
1783 /*
1784  * amdgpu_virt_request_bad_pages() - request bad pages
1785  * @adev: amdgpu device.
1786  * Send command to GPU hypervisor to write new bad pages into the shared PF2VF region
1787  */
1788 void amdgpu_virt_request_bad_pages(struct amdgpu_device *adev)
1789 {
1790 	struct amdgpu_virt *virt = &adev->virt;
1791 
1792 	if (virt->ops && virt->ops->req_bad_pages)
1793 		virt->ops->req_bad_pages(adev);
1794 }
1795 
1796 static int amdgpu_virt_cache_chk_criti_hit(struct amdgpu_device *adev,
1797 					   struct amdsriov_ras_telemetry *host_telemetry,
1798 					   bool *hit)
1799 {
1800 	struct amd_sriov_ras_chk_criti *tmp = NULL;
1801 	uint32_t checksum, used_size;
1802 
1803 	checksum = host_telemetry->header.checksum;
1804 	used_size = host_telemetry->header.used_size;
1805 
1806 	if (used_size > (AMD_SRIOV_MSG_RAS_TELEMETRY_SIZE_KB_V1 << 10))
1807 		return 0;
1808 
1809 	tmp = kmemdup(&host_telemetry->body.chk_criti, used_size, GFP_KERNEL);
1810 	if (!tmp)
1811 		return -ENOMEM;
1812 
1813 	if (checksum != amd_sriov_msg_checksum(tmp, used_size, 0, 0))
1814 		goto out;
1815 
1816 	if (hit)
1817 		*hit = tmp->hit ? true : false;
1818 
1819 out:
1820 	kfree(tmp);
1821 
1822 	return 0;
1823 }
1824 
1825 int amdgpu_virt_check_vf_critical_region(struct amdgpu_device *adev, u64 addr, bool *hit)
1826 {
1827 	struct amdgpu_virt *virt = &adev->virt;
1828 	int r = -EPERM;
1829 
1830 	if (!virt->ops || !virt->ops->req_ras_chk_criti)
1831 		return -EOPNOTSUPP;
1832 
1833 	/* Host allows 15 ras telemetry requests per 60 seconds. Afterwhich, the Host
1834 	 * will ignore incoming guest messages. Ratelimit the guest messages to
1835 	 * prevent guest self DOS.
1836 	 */
1837 	if (__ratelimit(&virt->ras.ras_chk_criti_rs)) {
1838 		mutex_lock(&virt->ras.ras_telemetry_mutex);
1839 		if (!virt->ops->req_ras_chk_criti(adev, addr))
1840 			r = amdgpu_virt_cache_chk_criti_hit(
1841 				adev, virt->fw_reserve.ras_telemetry, hit);
1842 		mutex_unlock(&virt->ras.ras_telemetry_mutex);
1843 	}
1844 
1845 	return r;
1846 }
1847