1 /* 2 * Copyright 2016 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #include <linux/module.h> 25 26 #ifdef CONFIG_X86 27 #include <asm/hypervisor.h> 28 #endif 29 30 #include <drm/drm_drv.h> 31 #include <xen/xen.h> 32 33 #include "amdgpu.h" 34 #include "amdgpu_ras.h" 35 #include "amdgpu_reset.h" 36 #include "vi.h" 37 #include "soc15.h" 38 #include "nv.h" 39 40 #define POPULATE_UCODE_INFO(vf2pf_info, ucode, ver) \ 41 do { \ 42 vf2pf_info->ucode_info[ucode].id = ucode; \ 43 vf2pf_info->ucode_info[ucode].version = ver; \ 44 } while (0) 45 46 bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev) 47 { 48 /* By now all MMIO pages except mailbox are blocked */ 49 /* if blocking is enabled in hypervisor. Choose the */ 50 /* SCRATCH_REG0 to test. */ 51 return RREG32_NO_KIQ(0xc040) == 0xffffffff; 52 } 53 54 void amdgpu_virt_init_setting(struct amdgpu_device *adev) 55 { 56 struct drm_device *ddev = adev_to_drm(adev); 57 58 /* enable virtual display */ 59 if (adev->asic_type != CHIP_ALDEBARAN && 60 adev->asic_type != CHIP_ARCTURUS && 61 ((adev->pdev->class >> 8) != PCI_CLASS_ACCELERATOR_PROCESSING)) { 62 if (adev->mode_info.num_crtc == 0) 63 adev->mode_info.num_crtc = 1; 64 adev->enable_virtual_display = true; 65 } 66 ddev->driver_features &= ~DRIVER_ATOMIC; 67 adev->cg_flags = 0; 68 adev->pg_flags = 0; 69 70 /* Reduce kcq number to 2 to reduce latency */ 71 if (amdgpu_num_kcq == -1) 72 amdgpu_num_kcq = 2; 73 } 74 75 /** 76 * amdgpu_virt_request_full_gpu() - request full gpu access 77 * @adev: amdgpu device. 78 * @init: is driver init time. 79 * When start to init/fini driver, first need to request full gpu access. 80 * Return: Zero if request success, otherwise will return error. 81 */ 82 int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init) 83 { 84 struct amdgpu_virt *virt = &adev->virt; 85 int r; 86 87 if (virt->ops && virt->ops->req_full_gpu) { 88 r = virt->ops->req_full_gpu(adev, init); 89 if (r) 90 return r; 91 92 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME; 93 } 94 95 return 0; 96 } 97 98 /** 99 * amdgpu_virt_release_full_gpu() - release full gpu access 100 * @adev: amdgpu device. 101 * @init: is driver init time. 102 * When finishing driver init/fini, need to release full gpu access. 103 * Return: Zero if release success, otherwise will returen error. 104 */ 105 int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init) 106 { 107 struct amdgpu_virt *virt = &adev->virt; 108 int r; 109 110 if (virt->ops && virt->ops->rel_full_gpu) { 111 r = virt->ops->rel_full_gpu(adev, init); 112 if (r) 113 return r; 114 115 adev->virt.caps |= AMDGPU_SRIOV_CAPS_RUNTIME; 116 } 117 return 0; 118 } 119 120 /** 121 * amdgpu_virt_reset_gpu() - reset gpu 122 * @adev: amdgpu device. 123 * Send reset command to GPU hypervisor to reset GPU that VM is using 124 * Return: Zero if reset success, otherwise will return error. 125 */ 126 int amdgpu_virt_reset_gpu(struct amdgpu_device *adev) 127 { 128 struct amdgpu_virt *virt = &adev->virt; 129 int r; 130 131 if (virt->ops && virt->ops->reset_gpu) { 132 r = virt->ops->reset_gpu(adev); 133 if (r) 134 return r; 135 136 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME; 137 } 138 139 return 0; 140 } 141 142 void amdgpu_virt_request_init_data(struct amdgpu_device *adev) 143 { 144 struct amdgpu_virt *virt = &adev->virt; 145 146 if (virt->ops && virt->ops->req_init_data) 147 virt->ops->req_init_data(adev); 148 149 if (adev->virt.req_init_data_ver > 0) 150 DRM_INFO("host supports REQ_INIT_DATA handshake\n"); 151 else 152 DRM_WARN("host doesn't support REQ_INIT_DATA handshake\n"); 153 } 154 155 /** 156 * amdgpu_virt_wait_reset() - wait for reset gpu completed 157 * @adev: amdgpu device. 158 * Wait for GPU reset completed. 159 * Return: Zero if reset success, otherwise will return error. 160 */ 161 int amdgpu_virt_wait_reset(struct amdgpu_device *adev) 162 { 163 struct amdgpu_virt *virt = &adev->virt; 164 165 if (!virt->ops || !virt->ops->wait_reset) 166 return -EINVAL; 167 168 return virt->ops->wait_reset(adev); 169 } 170 171 /** 172 * amdgpu_virt_alloc_mm_table() - alloc memory for mm table 173 * @adev: amdgpu device. 174 * MM table is used by UVD and VCE for its initialization 175 * Return: Zero if allocate success. 176 */ 177 int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev) 178 { 179 int r; 180 181 if (!amdgpu_sriov_vf(adev) || adev->virt.mm_table.gpu_addr) 182 return 0; 183 184 r = amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE, 185 AMDGPU_GEM_DOMAIN_VRAM | 186 AMDGPU_GEM_DOMAIN_GTT, 187 &adev->virt.mm_table.bo, 188 &adev->virt.mm_table.gpu_addr, 189 (void *)&adev->virt.mm_table.cpu_addr); 190 if (r) { 191 DRM_ERROR("failed to alloc mm table and error = %d.\n", r); 192 return r; 193 } 194 195 memset((void *)adev->virt.mm_table.cpu_addr, 0, PAGE_SIZE); 196 DRM_INFO("MM table gpu addr = 0x%llx, cpu addr = %p.\n", 197 adev->virt.mm_table.gpu_addr, 198 adev->virt.mm_table.cpu_addr); 199 return 0; 200 } 201 202 /** 203 * amdgpu_virt_free_mm_table() - free mm table memory 204 * @adev: amdgpu device. 205 * Free MM table memory 206 */ 207 void amdgpu_virt_free_mm_table(struct amdgpu_device *adev) 208 { 209 if (!amdgpu_sriov_vf(adev) || !adev->virt.mm_table.gpu_addr) 210 return; 211 212 amdgpu_bo_free_kernel(&adev->virt.mm_table.bo, 213 &adev->virt.mm_table.gpu_addr, 214 (void *)&adev->virt.mm_table.cpu_addr); 215 adev->virt.mm_table.gpu_addr = 0; 216 } 217 218 219 unsigned int amd_sriov_msg_checksum(void *obj, 220 unsigned long obj_size, 221 unsigned int key, 222 unsigned int checksum) 223 { 224 unsigned int ret = key; 225 unsigned long i = 0; 226 unsigned char *pos; 227 228 pos = (char *)obj; 229 /* calculate checksum */ 230 for (i = 0; i < obj_size; ++i) 231 ret += *(pos + i); 232 /* minus the checksum itself */ 233 pos = (char *)&checksum; 234 for (i = 0; i < sizeof(checksum); ++i) 235 ret -= *(pos + i); 236 return ret; 237 } 238 239 static int amdgpu_virt_init_ras_err_handler_data(struct amdgpu_device *adev) 240 { 241 struct amdgpu_virt *virt = &adev->virt; 242 struct amdgpu_virt_ras_err_handler_data **data = &virt->virt_eh_data; 243 /* GPU will be marked bad on host if bp count more then 10, 244 * so alloc 512 is enough. 245 */ 246 unsigned int align_space = 512; 247 void *bps = NULL; 248 struct amdgpu_bo **bps_bo = NULL; 249 250 *data = kmalloc(sizeof(struct amdgpu_virt_ras_err_handler_data), GFP_KERNEL); 251 if (!*data) 252 goto data_failure; 253 254 bps = kmalloc_array(align_space, sizeof(*(*data)->bps), GFP_KERNEL); 255 if (!bps) 256 goto bps_failure; 257 258 bps_bo = kmalloc_array(align_space, sizeof(*(*data)->bps_bo), GFP_KERNEL); 259 if (!bps_bo) 260 goto bps_bo_failure; 261 262 (*data)->bps = bps; 263 (*data)->bps_bo = bps_bo; 264 (*data)->count = 0; 265 (*data)->last_reserved = 0; 266 267 virt->ras_init_done = true; 268 269 return 0; 270 271 bps_bo_failure: 272 kfree(bps); 273 bps_failure: 274 kfree(*data); 275 data_failure: 276 return -ENOMEM; 277 } 278 279 static void amdgpu_virt_ras_release_bp(struct amdgpu_device *adev) 280 { 281 struct amdgpu_virt *virt = &adev->virt; 282 struct amdgpu_virt_ras_err_handler_data *data = virt->virt_eh_data; 283 struct amdgpu_bo *bo; 284 int i; 285 286 if (!data) 287 return; 288 289 for (i = data->last_reserved - 1; i >= 0; i--) { 290 bo = data->bps_bo[i]; 291 if (bo) { 292 amdgpu_bo_free_kernel(&bo, NULL, NULL); 293 data->bps_bo[i] = bo; 294 } 295 data->last_reserved = i; 296 } 297 } 298 299 void amdgpu_virt_release_ras_err_handler_data(struct amdgpu_device *adev) 300 { 301 struct amdgpu_virt *virt = &adev->virt; 302 struct amdgpu_virt_ras_err_handler_data *data = virt->virt_eh_data; 303 304 virt->ras_init_done = false; 305 306 if (!data) 307 return; 308 309 amdgpu_virt_ras_release_bp(adev); 310 311 kfree(data->bps); 312 kfree(data->bps_bo); 313 kfree(data); 314 virt->virt_eh_data = NULL; 315 } 316 317 static void amdgpu_virt_ras_add_bps(struct amdgpu_device *adev, 318 struct eeprom_table_record *bps, int pages) 319 { 320 struct amdgpu_virt *virt = &adev->virt; 321 struct amdgpu_virt_ras_err_handler_data *data = virt->virt_eh_data; 322 323 if (!data) 324 return; 325 326 memcpy(&data->bps[data->count], bps, pages * sizeof(*data->bps)); 327 data->count += pages; 328 } 329 330 static void amdgpu_virt_ras_reserve_bps(struct amdgpu_device *adev) 331 { 332 struct amdgpu_virt *virt = &adev->virt; 333 struct amdgpu_virt_ras_err_handler_data *data = virt->virt_eh_data; 334 struct amdgpu_vram_mgr *mgr = &adev->mman.vram_mgr; 335 struct ttm_resource_manager *man = &mgr->manager; 336 struct amdgpu_bo *bo = NULL; 337 uint64_t bp; 338 int i; 339 340 if (!data) 341 return; 342 343 for (i = data->last_reserved; i < data->count; i++) { 344 bp = data->bps[i].retired_page; 345 346 /* There are two cases of reserve error should be ignored: 347 * 1) a ras bad page has been allocated (used by someone); 348 * 2) a ras bad page has been reserved (duplicate error injection 349 * for one page); 350 */ 351 if (ttm_resource_manager_used(man)) { 352 amdgpu_vram_mgr_reserve_range(&adev->mman.vram_mgr, 353 bp << AMDGPU_GPU_PAGE_SHIFT, 354 AMDGPU_GPU_PAGE_SIZE); 355 data->bps_bo[i] = NULL; 356 } else { 357 if (amdgpu_bo_create_kernel_at(adev, bp << AMDGPU_GPU_PAGE_SHIFT, 358 AMDGPU_GPU_PAGE_SIZE, 359 &bo, NULL)) 360 DRM_DEBUG("RAS WARN: reserve vram for retired page %llx fail\n", bp); 361 data->bps_bo[i] = bo; 362 } 363 data->last_reserved = i + 1; 364 bo = NULL; 365 } 366 } 367 368 static bool amdgpu_virt_ras_check_bad_page(struct amdgpu_device *adev, 369 uint64_t retired_page) 370 { 371 struct amdgpu_virt *virt = &adev->virt; 372 struct amdgpu_virt_ras_err_handler_data *data = virt->virt_eh_data; 373 int i; 374 375 if (!data) 376 return true; 377 378 for (i = 0; i < data->count; i++) 379 if (retired_page == data->bps[i].retired_page) 380 return true; 381 382 return false; 383 } 384 385 static void amdgpu_virt_add_bad_page(struct amdgpu_device *adev, 386 uint64_t bp_block_offset, uint32_t bp_block_size) 387 { 388 struct eeprom_table_record bp; 389 uint64_t retired_page; 390 uint32_t bp_idx, bp_cnt; 391 void *vram_usage_va = NULL; 392 393 if (adev->mman.fw_vram_usage_va) 394 vram_usage_va = adev->mman.fw_vram_usage_va; 395 else 396 vram_usage_va = adev->mman.drv_vram_usage_va; 397 398 if (bp_block_size) { 399 bp_cnt = bp_block_size / sizeof(uint64_t); 400 for (bp_idx = 0; bp_idx < bp_cnt; bp_idx++) { 401 retired_page = *(uint64_t *)(vram_usage_va + 402 bp_block_offset + bp_idx * sizeof(uint64_t)); 403 bp.retired_page = retired_page; 404 405 if (amdgpu_virt_ras_check_bad_page(adev, retired_page)) 406 continue; 407 408 amdgpu_virt_ras_add_bps(adev, &bp, 1); 409 410 amdgpu_virt_ras_reserve_bps(adev); 411 } 412 } 413 } 414 415 static int amdgpu_virt_read_pf2vf_data(struct amdgpu_device *adev) 416 { 417 struct amd_sriov_msg_pf2vf_info_header *pf2vf_info = adev->virt.fw_reserve.p_pf2vf; 418 uint32_t checksum; 419 uint32_t checkval; 420 421 uint32_t i; 422 uint32_t tmp; 423 424 if (adev->virt.fw_reserve.p_pf2vf == NULL) 425 return -EINVAL; 426 427 if (pf2vf_info->size > 1024) { 428 dev_err(adev->dev, "invalid pf2vf message size: 0x%x\n", pf2vf_info->size); 429 return -EINVAL; 430 } 431 432 switch (pf2vf_info->version) { 433 case 1: 434 checksum = ((struct amdgim_pf2vf_info_v1 *)pf2vf_info)->checksum; 435 checkval = amd_sriov_msg_checksum( 436 adev->virt.fw_reserve.p_pf2vf, pf2vf_info->size, 437 adev->virt.fw_reserve.checksum_key, checksum); 438 if (checksum != checkval) { 439 dev_err(adev->dev, 440 "invalid pf2vf message: header checksum=0x%x calculated checksum=0x%x\n", 441 checksum, checkval); 442 return -EINVAL; 443 } 444 445 adev->virt.gim_feature = 446 ((struct amdgim_pf2vf_info_v1 *)pf2vf_info)->feature_flags; 447 break; 448 case 2: 449 /* TODO: missing key, need to add it later */ 450 checksum = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->checksum; 451 checkval = amd_sriov_msg_checksum( 452 adev->virt.fw_reserve.p_pf2vf, pf2vf_info->size, 453 0, checksum); 454 if (checksum != checkval) { 455 dev_err(adev->dev, 456 "invalid pf2vf message: header checksum=0x%x calculated checksum=0x%x\n", 457 checksum, checkval); 458 return -EINVAL; 459 } 460 461 adev->virt.vf2pf_update_interval_ms = 462 ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->vf2pf_update_interval_ms; 463 adev->virt.gim_feature = 464 ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->feature_flags.all; 465 adev->virt.reg_access = 466 ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->reg_access_flags.all; 467 468 adev->virt.decode_max_dimension_pixels = 0; 469 adev->virt.decode_max_frame_pixels = 0; 470 adev->virt.encode_max_dimension_pixels = 0; 471 adev->virt.encode_max_frame_pixels = 0; 472 adev->virt.is_mm_bw_enabled = false; 473 for (i = 0; i < AMD_SRIOV_MSG_RESERVE_VCN_INST; i++) { 474 tmp = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->mm_bw_management[i].decode_max_dimension_pixels; 475 adev->virt.decode_max_dimension_pixels = max(tmp, adev->virt.decode_max_dimension_pixels); 476 477 tmp = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->mm_bw_management[i].decode_max_frame_pixels; 478 adev->virt.decode_max_frame_pixels = max(tmp, adev->virt.decode_max_frame_pixels); 479 480 tmp = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->mm_bw_management[i].encode_max_dimension_pixels; 481 adev->virt.encode_max_dimension_pixels = max(tmp, adev->virt.encode_max_dimension_pixels); 482 483 tmp = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->mm_bw_management[i].encode_max_frame_pixels; 484 adev->virt.encode_max_frame_pixels = max(tmp, adev->virt.encode_max_frame_pixels); 485 } 486 if ((adev->virt.decode_max_dimension_pixels > 0) || (adev->virt.encode_max_dimension_pixels > 0)) 487 adev->virt.is_mm_bw_enabled = true; 488 489 adev->unique_id = 490 ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->uuid; 491 break; 492 default: 493 dev_err(adev->dev, "invalid pf2vf version: 0x%x\n", pf2vf_info->version); 494 return -EINVAL; 495 } 496 497 /* correct too large or too little interval value */ 498 if (adev->virt.vf2pf_update_interval_ms < 200 || adev->virt.vf2pf_update_interval_ms > 10000) 499 adev->virt.vf2pf_update_interval_ms = 2000; 500 501 return 0; 502 } 503 504 static void amdgpu_virt_populate_vf2pf_ucode_info(struct amdgpu_device *adev) 505 { 506 struct amd_sriov_msg_vf2pf_info *vf2pf_info; 507 vf2pf_info = (struct amd_sriov_msg_vf2pf_info *) adev->virt.fw_reserve.p_vf2pf; 508 509 if (adev->virt.fw_reserve.p_vf2pf == NULL) 510 return; 511 512 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_VCE, adev->vce.fw_version); 513 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_UVD, adev->uvd.fw_version); 514 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_MC, adev->gmc.fw_version); 515 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_ME, adev->gfx.me_fw_version); 516 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_PFP, adev->gfx.pfp_fw_version); 517 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_CE, adev->gfx.ce_fw_version); 518 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_RLC, adev->gfx.rlc_fw_version); 519 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_RLC_SRLC, adev->gfx.rlc_srlc_fw_version); 520 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_RLC_SRLG, adev->gfx.rlc_srlg_fw_version); 521 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_RLC_SRLS, adev->gfx.rlc_srls_fw_version); 522 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_MEC, adev->gfx.mec_fw_version); 523 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_MEC2, adev->gfx.mec2_fw_version); 524 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SOS, adev->psp.sos.fw_version); 525 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_ASD, 526 adev->psp.asd_context.bin_desc.fw_version); 527 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_TA_RAS, 528 adev->psp.ras_context.context.bin_desc.fw_version); 529 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_TA_XGMI, 530 adev->psp.xgmi_context.context.bin_desc.fw_version); 531 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SMC, adev->pm.fw_version); 532 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SDMA, adev->sdma.instance[0].fw_version); 533 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SDMA2, adev->sdma.instance[1].fw_version); 534 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_VCN, adev->vcn.fw_version); 535 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_DMCU, adev->dm.dmcu_fw_version); 536 } 537 538 static int amdgpu_virt_write_vf2pf_data(struct amdgpu_device *adev) 539 { 540 struct amd_sriov_msg_vf2pf_info *vf2pf_info; 541 542 vf2pf_info = (struct amd_sriov_msg_vf2pf_info *) adev->virt.fw_reserve.p_vf2pf; 543 544 if (adev->virt.fw_reserve.p_vf2pf == NULL) 545 return -EINVAL; 546 547 memset(vf2pf_info, 0, sizeof(struct amd_sriov_msg_vf2pf_info)); 548 549 vf2pf_info->header.size = sizeof(struct amd_sriov_msg_vf2pf_info); 550 vf2pf_info->header.version = AMD_SRIOV_MSG_FW_VRAM_VF2PF_VER; 551 552 #ifdef MODULE 553 if (THIS_MODULE->version != NULL) 554 strcpy(vf2pf_info->driver_version, THIS_MODULE->version); 555 else 556 #endif 557 strcpy(vf2pf_info->driver_version, "N/A"); 558 559 vf2pf_info->pf2vf_version_required = 0; // no requirement, guest understands all 560 vf2pf_info->driver_cert = 0; 561 vf2pf_info->os_info.all = 0; 562 563 vf2pf_info->fb_usage = 564 ttm_resource_manager_usage(&adev->mman.vram_mgr.manager) >> 20; 565 vf2pf_info->fb_vis_usage = 566 amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr) >> 20; 567 vf2pf_info->fb_size = adev->gmc.real_vram_size >> 20; 568 vf2pf_info->fb_vis_size = adev->gmc.visible_vram_size >> 20; 569 570 amdgpu_virt_populate_vf2pf_ucode_info(adev); 571 572 /* TODO: read dynamic info */ 573 vf2pf_info->gfx_usage = 0; 574 vf2pf_info->compute_usage = 0; 575 vf2pf_info->encode_usage = 0; 576 vf2pf_info->decode_usage = 0; 577 578 vf2pf_info->dummy_page_addr = (uint64_t)adev->dummy_page_addr; 579 vf2pf_info->checksum = 580 amd_sriov_msg_checksum( 581 vf2pf_info, vf2pf_info->header.size, 0, 0); 582 583 return 0; 584 } 585 586 static void amdgpu_virt_update_vf2pf_work_item(struct work_struct *work) 587 { 588 struct amdgpu_device *adev = container_of(work, struct amdgpu_device, virt.vf2pf_work.work); 589 int ret; 590 591 ret = amdgpu_virt_read_pf2vf_data(adev); 592 if (ret) { 593 adev->virt.vf2pf_update_retry_cnt++; 594 if ((adev->virt.vf2pf_update_retry_cnt >= AMDGPU_VF2PF_UPDATE_MAX_RETRY_LIMIT) && 595 amdgpu_sriov_runtime(adev) && !amdgpu_in_reset(adev)) { 596 if (amdgpu_reset_domain_schedule(adev->reset_domain, 597 &adev->virt.flr_work)) 598 return; 599 else 600 dev_err(adev->dev, "Failed to queue work! at %s", __func__); 601 } 602 603 goto out; 604 } 605 606 adev->virt.vf2pf_update_retry_cnt = 0; 607 amdgpu_virt_write_vf2pf_data(adev); 608 609 out: 610 schedule_delayed_work(&(adev->virt.vf2pf_work), adev->virt.vf2pf_update_interval_ms); 611 } 612 613 void amdgpu_virt_fini_data_exchange(struct amdgpu_device *adev) 614 { 615 if (adev->virt.vf2pf_update_interval_ms != 0) { 616 DRM_INFO("clean up the vf2pf work item\n"); 617 cancel_delayed_work_sync(&adev->virt.vf2pf_work); 618 adev->virt.vf2pf_update_interval_ms = 0; 619 } 620 } 621 622 void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev) 623 { 624 adev->virt.fw_reserve.p_pf2vf = NULL; 625 adev->virt.fw_reserve.p_vf2pf = NULL; 626 adev->virt.vf2pf_update_interval_ms = 0; 627 adev->virt.vf2pf_update_retry_cnt = 0; 628 629 if (adev->mman.fw_vram_usage_va && adev->mman.drv_vram_usage_va) { 630 DRM_WARN("Currently fw_vram and drv_vram should not have values at the same time!"); 631 } else if (adev->mman.fw_vram_usage_va || adev->mman.drv_vram_usage_va) { 632 /* go through this logic in ip_init and reset to init workqueue*/ 633 amdgpu_virt_exchange_data(adev); 634 635 INIT_DELAYED_WORK(&adev->virt.vf2pf_work, amdgpu_virt_update_vf2pf_work_item); 636 schedule_delayed_work(&(adev->virt.vf2pf_work), msecs_to_jiffies(adev->virt.vf2pf_update_interval_ms)); 637 } else if (adev->bios != NULL) { 638 /* got through this logic in early init stage to get necessary flags, e.g. rlcg_acc related*/ 639 adev->virt.fw_reserve.p_pf2vf = 640 (struct amd_sriov_msg_pf2vf_info_header *) 641 (adev->bios + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10)); 642 643 amdgpu_virt_read_pf2vf_data(adev); 644 } 645 } 646 647 648 void amdgpu_virt_exchange_data(struct amdgpu_device *adev) 649 { 650 uint64_t bp_block_offset = 0; 651 uint32_t bp_block_size = 0; 652 struct amd_sriov_msg_pf2vf_info *pf2vf_v2 = NULL; 653 654 if (adev->mman.fw_vram_usage_va || adev->mman.drv_vram_usage_va) { 655 if (adev->mman.fw_vram_usage_va) { 656 adev->virt.fw_reserve.p_pf2vf = 657 (struct amd_sriov_msg_pf2vf_info_header *) 658 (adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10)); 659 adev->virt.fw_reserve.p_vf2pf = 660 (struct amd_sriov_msg_vf2pf_info_header *) 661 (adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_VF2PF_OFFSET_KB << 10)); 662 } else if (adev->mman.drv_vram_usage_va) { 663 adev->virt.fw_reserve.p_pf2vf = 664 (struct amd_sriov_msg_pf2vf_info_header *) 665 (adev->mman.drv_vram_usage_va + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10)); 666 adev->virt.fw_reserve.p_vf2pf = 667 (struct amd_sriov_msg_vf2pf_info_header *) 668 (adev->mman.drv_vram_usage_va + (AMD_SRIOV_MSG_VF2PF_OFFSET_KB << 10)); 669 } 670 671 amdgpu_virt_read_pf2vf_data(adev); 672 amdgpu_virt_write_vf2pf_data(adev); 673 674 /* bad page handling for version 2 */ 675 if (adev->virt.fw_reserve.p_pf2vf->version == 2) { 676 pf2vf_v2 = (struct amd_sriov_msg_pf2vf_info *)adev->virt.fw_reserve.p_pf2vf; 677 678 bp_block_offset = ((uint64_t)pf2vf_v2->bp_block_offset_low & 0xFFFFFFFF) | 679 ((((uint64_t)pf2vf_v2->bp_block_offset_high) << 32) & 0xFFFFFFFF00000000); 680 bp_block_size = pf2vf_v2->bp_block_size; 681 682 if (bp_block_size && !adev->virt.ras_init_done) 683 amdgpu_virt_init_ras_err_handler_data(adev); 684 685 if (adev->virt.ras_init_done) 686 amdgpu_virt_add_bad_page(adev, bp_block_offset, bp_block_size); 687 } 688 } 689 } 690 691 void amdgpu_detect_virtualization(struct amdgpu_device *adev) 692 { 693 uint32_t reg; 694 695 switch (adev->asic_type) { 696 case CHIP_TONGA: 697 case CHIP_FIJI: 698 reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER); 699 break; 700 case CHIP_VEGA10: 701 case CHIP_VEGA20: 702 case CHIP_NAVI10: 703 case CHIP_NAVI12: 704 case CHIP_SIENNA_CICHLID: 705 case CHIP_ARCTURUS: 706 case CHIP_ALDEBARAN: 707 case CHIP_IP_DISCOVERY: 708 reg = RREG32(mmRCC_IOV_FUNC_IDENTIFIER); 709 break; 710 default: /* other chip doesn't support SRIOV */ 711 reg = 0; 712 break; 713 } 714 715 if (reg & 1) 716 adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF; 717 718 if (reg & 0x80000000) 719 adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV; 720 721 if (!reg) { 722 /* passthrough mode exclus sriov mod */ 723 if (is_virtual_machine() && !xen_initial_domain()) 724 adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE; 725 } 726 727 /* we have the ability to check now */ 728 if (amdgpu_sriov_vf(adev)) { 729 switch (adev->asic_type) { 730 case CHIP_TONGA: 731 case CHIP_FIJI: 732 vi_set_virt_ops(adev); 733 break; 734 case CHIP_VEGA10: 735 soc15_set_virt_ops(adev); 736 #ifdef CONFIG_X86 737 /* not send GPU_INIT_DATA with MS_HYPERV*/ 738 if (!hypervisor_is_type(X86_HYPER_MS_HYPERV)) 739 #endif 740 /* send a dummy GPU_INIT_DATA request to host on vega10 */ 741 amdgpu_virt_request_init_data(adev); 742 break; 743 case CHIP_VEGA20: 744 case CHIP_ARCTURUS: 745 case CHIP_ALDEBARAN: 746 soc15_set_virt_ops(adev); 747 break; 748 case CHIP_NAVI10: 749 case CHIP_NAVI12: 750 case CHIP_SIENNA_CICHLID: 751 case CHIP_IP_DISCOVERY: 752 nv_set_virt_ops(adev); 753 /* try send GPU_INIT_DATA request to host */ 754 amdgpu_virt_request_init_data(adev); 755 break; 756 default: /* other chip doesn't support SRIOV */ 757 DRM_ERROR("Unknown asic type: %d!\n", adev->asic_type); 758 break; 759 } 760 } 761 } 762 763 static bool amdgpu_virt_access_debugfs_is_mmio(struct amdgpu_device *adev) 764 { 765 return amdgpu_sriov_is_debug(adev) ? true : false; 766 } 767 768 static bool amdgpu_virt_access_debugfs_is_kiq(struct amdgpu_device *adev) 769 { 770 return amdgpu_sriov_is_normal(adev) ? true : false; 771 } 772 773 int amdgpu_virt_enable_access_debugfs(struct amdgpu_device *adev) 774 { 775 if (!amdgpu_sriov_vf(adev) || 776 amdgpu_virt_access_debugfs_is_kiq(adev)) 777 return 0; 778 779 if (amdgpu_virt_access_debugfs_is_mmio(adev)) 780 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME; 781 else 782 return -EPERM; 783 784 return 0; 785 } 786 787 void amdgpu_virt_disable_access_debugfs(struct amdgpu_device *adev) 788 { 789 if (amdgpu_sriov_vf(adev)) 790 adev->virt.caps |= AMDGPU_SRIOV_CAPS_RUNTIME; 791 } 792 793 enum amdgpu_sriov_vf_mode amdgpu_virt_get_sriov_vf_mode(struct amdgpu_device *adev) 794 { 795 enum amdgpu_sriov_vf_mode mode; 796 797 if (amdgpu_sriov_vf(adev)) { 798 if (amdgpu_sriov_is_pp_one_vf(adev)) 799 mode = SRIOV_VF_MODE_ONE_VF; 800 else 801 mode = SRIOV_VF_MODE_MULTI_VF; 802 } else { 803 mode = SRIOV_VF_MODE_BARE_METAL; 804 } 805 806 return mode; 807 } 808 809 void amdgpu_virt_post_reset(struct amdgpu_device *adev) 810 { 811 if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 0, 3)) { 812 /* force set to GFXOFF state after reset, 813 * to avoid some invalid operation before GC enable 814 */ 815 adev->gfx.is_poweron = false; 816 } 817 } 818 819 bool amdgpu_virt_fw_load_skip_check(struct amdgpu_device *adev, uint32_t ucode_id) 820 { 821 switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) { 822 case IP_VERSION(13, 0, 0): 823 /* no vf autoload, white list */ 824 if (ucode_id == AMDGPU_UCODE_ID_VCN1 || 825 ucode_id == AMDGPU_UCODE_ID_VCN) 826 return false; 827 else 828 return true; 829 case IP_VERSION(11, 0, 9): 830 case IP_VERSION(11, 0, 7): 831 /* black list for CHIP_NAVI12 and CHIP_SIENNA_CICHLID */ 832 if (ucode_id == AMDGPU_UCODE_ID_RLC_G 833 || ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL 834 || ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM 835 || ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM 836 || ucode_id == AMDGPU_UCODE_ID_SMC) 837 return true; 838 else 839 return false; 840 case IP_VERSION(13, 0, 10): 841 /* white list */ 842 if (ucode_id == AMDGPU_UCODE_ID_CAP 843 || ucode_id == AMDGPU_UCODE_ID_CP_RS64_PFP 844 || ucode_id == AMDGPU_UCODE_ID_CP_RS64_ME 845 || ucode_id == AMDGPU_UCODE_ID_CP_RS64_MEC 846 || ucode_id == AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK 847 || ucode_id == AMDGPU_UCODE_ID_CP_RS64_PFP_P1_STACK 848 || ucode_id == AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK 849 || ucode_id == AMDGPU_UCODE_ID_CP_RS64_ME_P1_STACK 850 || ucode_id == AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK 851 || ucode_id == AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK 852 || ucode_id == AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK 853 || ucode_id == AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK 854 || ucode_id == AMDGPU_UCODE_ID_CP_MES 855 || ucode_id == AMDGPU_UCODE_ID_CP_MES_DATA 856 || ucode_id == AMDGPU_UCODE_ID_CP_MES1 857 || ucode_id == AMDGPU_UCODE_ID_CP_MES1_DATA 858 || ucode_id == AMDGPU_UCODE_ID_VCN1 859 || ucode_id == AMDGPU_UCODE_ID_VCN) 860 return false; 861 else 862 return true; 863 default: 864 /* lagacy black list */ 865 if (ucode_id == AMDGPU_UCODE_ID_SDMA0 866 || ucode_id == AMDGPU_UCODE_ID_SDMA1 867 || ucode_id == AMDGPU_UCODE_ID_SDMA2 868 || ucode_id == AMDGPU_UCODE_ID_SDMA3 869 || ucode_id == AMDGPU_UCODE_ID_SDMA4 870 || ucode_id == AMDGPU_UCODE_ID_SDMA5 871 || ucode_id == AMDGPU_UCODE_ID_SDMA6 872 || ucode_id == AMDGPU_UCODE_ID_SDMA7 873 || ucode_id == AMDGPU_UCODE_ID_RLC_G 874 || ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL 875 || ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM 876 || ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM 877 || ucode_id == AMDGPU_UCODE_ID_SMC) 878 return true; 879 else 880 return false; 881 } 882 } 883 884 void amdgpu_virt_update_sriov_video_codec(struct amdgpu_device *adev, 885 struct amdgpu_video_codec_info *encode, uint32_t encode_array_size, 886 struct amdgpu_video_codec_info *decode, uint32_t decode_array_size) 887 { 888 uint32_t i; 889 890 if (!adev->virt.is_mm_bw_enabled) 891 return; 892 893 if (encode) { 894 for (i = 0; i < encode_array_size; i++) { 895 encode[i].max_width = adev->virt.encode_max_dimension_pixels; 896 encode[i].max_pixels_per_frame = adev->virt.encode_max_frame_pixels; 897 if (encode[i].max_width > 0) 898 encode[i].max_height = encode[i].max_pixels_per_frame / encode[i].max_width; 899 else 900 encode[i].max_height = 0; 901 } 902 } 903 904 if (decode) { 905 for (i = 0; i < decode_array_size; i++) { 906 decode[i].max_width = adev->virt.decode_max_dimension_pixels; 907 decode[i].max_pixels_per_frame = adev->virt.decode_max_frame_pixels; 908 if (decode[i].max_width > 0) 909 decode[i].max_height = decode[i].max_pixels_per_frame / decode[i].max_width; 910 else 911 decode[i].max_height = 0; 912 } 913 } 914 } 915 916 bool amdgpu_virt_get_rlcg_reg_access_flag(struct amdgpu_device *adev, 917 u32 acc_flags, u32 hwip, 918 bool write, u32 *rlcg_flag) 919 { 920 bool ret = false; 921 922 switch (hwip) { 923 case GC_HWIP: 924 if (amdgpu_sriov_reg_indirect_gc(adev)) { 925 *rlcg_flag = 926 write ? AMDGPU_RLCG_GC_WRITE : AMDGPU_RLCG_GC_READ; 927 ret = true; 928 /* only in new version, AMDGPU_REGS_NO_KIQ and 929 * AMDGPU_REGS_RLC are enabled simultaneously */ 930 } else if ((acc_flags & AMDGPU_REGS_RLC) && 931 !(acc_flags & AMDGPU_REGS_NO_KIQ) && write) { 932 *rlcg_flag = AMDGPU_RLCG_GC_WRITE_LEGACY; 933 ret = true; 934 } 935 break; 936 case MMHUB_HWIP: 937 if (amdgpu_sriov_reg_indirect_mmhub(adev) && 938 (acc_flags & AMDGPU_REGS_RLC) && write) { 939 *rlcg_flag = AMDGPU_RLCG_MMHUB_WRITE; 940 ret = true; 941 } 942 break; 943 default: 944 break; 945 } 946 return ret; 947 } 948 949 u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v, u32 flag, u32 xcc_id) 950 { 951 struct amdgpu_rlcg_reg_access_ctrl *reg_access_ctrl; 952 uint32_t timeout = 50000; 953 uint32_t i, tmp; 954 uint32_t ret = 0; 955 void *scratch_reg0; 956 void *scratch_reg1; 957 void *scratch_reg2; 958 void *scratch_reg3; 959 void *spare_int; 960 961 if (!adev->gfx.rlc.rlcg_reg_access_supported) { 962 dev_err(adev->dev, 963 "indirect registers access through rlcg is not available\n"); 964 return 0; 965 } 966 967 if (adev->gfx.xcc_mask && (((1 << xcc_id) & adev->gfx.xcc_mask) == 0)) { 968 dev_err(adev->dev, "invalid xcc\n"); 969 return 0; 970 } 971 972 reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl[xcc_id]; 973 scratch_reg0 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg0; 974 scratch_reg1 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg1; 975 scratch_reg2 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg2; 976 scratch_reg3 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg3; 977 if (reg_access_ctrl->spare_int) 978 spare_int = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->spare_int; 979 980 if (offset == reg_access_ctrl->grbm_cntl) { 981 /* if the target reg offset is grbm_cntl, write to scratch_reg2 */ 982 writel(v, scratch_reg2); 983 if (flag == AMDGPU_RLCG_GC_WRITE_LEGACY) 984 writel(v, ((void __iomem *)adev->rmmio) + (offset * 4)); 985 } else if (offset == reg_access_ctrl->grbm_idx) { 986 /* if the target reg offset is grbm_idx, write to scratch_reg3 */ 987 writel(v, scratch_reg3); 988 if (flag == AMDGPU_RLCG_GC_WRITE_LEGACY) 989 writel(v, ((void __iomem *)adev->rmmio) + (offset * 4)); 990 } else { 991 /* 992 * SCRATCH_REG0 = read/write value 993 * SCRATCH_REG1[30:28] = command 994 * SCRATCH_REG1[19:0] = address in dword 995 * SCRATCH_REG1[27:24] = Error reporting 996 */ 997 writel(v, scratch_reg0); 998 writel((offset | flag), scratch_reg1); 999 if (reg_access_ctrl->spare_int) 1000 writel(1, spare_int); 1001 1002 for (i = 0; i < timeout; i++) { 1003 tmp = readl(scratch_reg1); 1004 if (!(tmp & AMDGPU_RLCG_SCRATCH1_ADDRESS_MASK)) 1005 break; 1006 udelay(10); 1007 } 1008 1009 tmp = readl(scratch_reg1); 1010 if (i >= timeout || (tmp & AMDGPU_RLCG_SCRATCH1_ERROR_MASK) != 0) { 1011 if (amdgpu_sriov_rlcg_error_report_enabled(adev)) { 1012 if (tmp & AMDGPU_RLCG_VFGATE_DISABLED) { 1013 dev_err(adev->dev, 1014 "vfgate is disabled, rlcg failed to program reg: 0x%05x\n", offset); 1015 } else if (tmp & AMDGPU_RLCG_WRONG_OPERATION_TYPE) { 1016 dev_err(adev->dev, 1017 "wrong operation type, rlcg failed to program reg: 0x%05x\n", offset); 1018 } else if (tmp & AMDGPU_RLCG_REG_NOT_IN_RANGE) { 1019 dev_err(adev->dev, 1020 "register is not in range, rlcg failed to program reg: 0x%05x\n", offset); 1021 } else { 1022 dev_err(adev->dev, 1023 "unknown error type, rlcg failed to program reg: 0x%05x\n", offset); 1024 } 1025 } else { 1026 dev_err(adev->dev, 1027 "timeout: rlcg faled to program reg: 0x%05x\n", offset); 1028 } 1029 } 1030 } 1031 1032 ret = readl(scratch_reg0); 1033 return ret; 1034 } 1035 1036 void amdgpu_sriov_wreg(struct amdgpu_device *adev, 1037 u32 offset, u32 value, 1038 u32 acc_flags, u32 hwip, u32 xcc_id) 1039 { 1040 u32 rlcg_flag; 1041 1042 if (!amdgpu_sriov_runtime(adev) && 1043 amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags, hwip, true, &rlcg_flag)) { 1044 amdgpu_virt_rlcg_reg_rw(adev, offset, value, rlcg_flag, xcc_id); 1045 return; 1046 } 1047 1048 if (acc_flags & AMDGPU_REGS_NO_KIQ) 1049 WREG32_NO_KIQ(offset, value); 1050 else 1051 WREG32(offset, value); 1052 } 1053 1054 u32 amdgpu_sriov_rreg(struct amdgpu_device *adev, 1055 u32 offset, u32 acc_flags, u32 hwip, u32 xcc_id) 1056 { 1057 u32 rlcg_flag; 1058 1059 if (!amdgpu_sriov_runtime(adev) && 1060 amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags, hwip, false, &rlcg_flag)) 1061 return amdgpu_virt_rlcg_reg_rw(adev, offset, 0, rlcg_flag, xcc_id); 1062 1063 if (acc_flags & AMDGPU_REGS_NO_KIQ) 1064 return RREG32_NO_KIQ(offset); 1065 else 1066 return RREG32(offset); 1067 } 1068 1069 bool amdgpu_sriov_xnack_support(struct amdgpu_device *adev) 1070 { 1071 bool xnack_mode = true; 1072 1073 if (amdgpu_sriov_vf(adev) && 1074 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2)) 1075 xnack_mode = false; 1076 1077 return xnack_mode; 1078 } 1079