1 /* 2 * Copyright 2016 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #include <drm/amdgpu_drm.h> 25 #include "amdgpu.h" 26 #include "atomfirmware.h" 27 #include "amdgpu_atomfirmware.h" 28 #include "atom.h" 29 #include "atombios.h" 30 #include "soc15_hw_ip.h" 31 32 union firmware_info { 33 struct atom_firmware_info_v3_1 v31; 34 struct atom_firmware_info_v3_2 v32; 35 struct atom_firmware_info_v3_3 v33; 36 struct atom_firmware_info_v3_4 v34; 37 struct atom_firmware_info_v3_5 v35; 38 }; 39 40 /* 41 * Helper function to query firmware capability 42 * 43 * @adev: amdgpu_device pointer 44 * 45 * Return firmware_capability in firmwareinfo table on success or 0 if not 46 */ 47 uint32_t amdgpu_atomfirmware_query_firmware_capability(struct amdgpu_device *adev) 48 { 49 struct amdgpu_mode_info *mode_info = &adev->mode_info; 50 int index; 51 u16 data_offset, size; 52 union firmware_info *firmware_info; 53 u8 frev, crev; 54 u32 fw_cap = 0; 55 56 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, 57 firmwareinfo); 58 59 if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context, 60 index, &size, &frev, &crev, &data_offset)) { 61 /* support firmware_info 3.1 + */ 62 if ((frev == 3 && crev >= 1) || (frev > 3)) { 63 firmware_info = (union firmware_info *) 64 (mode_info->atom_context->bios + data_offset); 65 fw_cap = le32_to_cpu(firmware_info->v31.firmware_capability); 66 } 67 } 68 69 return fw_cap; 70 } 71 72 /* 73 * Helper function to query gpu virtualizaiton capability 74 * 75 * @adev: amdgpu_device pointer 76 * 77 * Return true if gpu virtualization is supported or false if not 78 */ 79 bool amdgpu_atomfirmware_gpu_virtualization_supported(struct amdgpu_device *adev) 80 { 81 u32 fw_cap; 82 83 fw_cap = adev->mode_info.firmware_flags; 84 85 return (fw_cap & ATOM_FIRMWARE_CAP_GPU_VIRTUALIZATION) ? true : false; 86 } 87 88 void amdgpu_atomfirmware_scratch_regs_init(struct amdgpu_device *adev) 89 { 90 int index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, 91 firmwareinfo); 92 uint16_t data_offset; 93 94 if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context, index, NULL, 95 NULL, NULL, &data_offset)) { 96 struct atom_firmware_info_v3_1 *firmware_info = 97 (struct atom_firmware_info_v3_1 *)(adev->mode_info.atom_context->bios + 98 data_offset); 99 100 adev->bios_scratch_reg_offset = 101 le32_to_cpu(firmware_info->bios_scratch_reg_startaddr); 102 } 103 } 104 105 static int amdgpu_atomfirmware_allocate_fb_v2_1(struct amdgpu_device *adev, 106 struct vram_usagebyfirmware_v2_1 *fw_usage, int *usage_bytes) 107 { 108 u32 start_addr, fw_size, drv_size; 109 110 start_addr = le32_to_cpu(fw_usage->start_address_in_kb); 111 fw_size = le16_to_cpu(fw_usage->used_by_firmware_in_kb); 112 drv_size = le16_to_cpu(fw_usage->used_by_driver_in_kb); 113 114 DRM_DEBUG("atom firmware v2_1 requested %08x %dkb fw %dkb drv\n", 115 start_addr, 116 fw_size, 117 drv_size); 118 119 if ((start_addr & ATOM_VRAM_OPERATION_FLAGS_MASK) == 120 (u32)(ATOM_VRAM_BLOCK_SRIOV_MSG_SHARE_RESERVATION << 121 ATOM_VRAM_OPERATION_FLAGS_SHIFT)) { 122 /* Firmware request VRAM reservation for SR-IOV */ 123 adev->mman.fw_vram_usage_start_offset = (start_addr & 124 (~ATOM_VRAM_OPERATION_FLAGS_MASK)) << 10; 125 adev->mman.fw_vram_usage_size = fw_size << 10; 126 /* Use the default scratch size */ 127 *usage_bytes = 0; 128 } else { 129 *usage_bytes = drv_size << 10; 130 } 131 return 0; 132 } 133 134 static int amdgpu_atomfirmware_allocate_fb_v2_2(struct amdgpu_device *adev, 135 struct vram_usagebyfirmware_v2_2 *fw_usage, int *usage_bytes) 136 { 137 u32 fw_start_addr, fw_size, drv_start_addr, drv_size; 138 139 fw_start_addr = le32_to_cpu(fw_usage->fw_region_start_address_in_kb); 140 fw_size = le16_to_cpu(fw_usage->used_by_firmware_in_kb); 141 142 drv_start_addr = le32_to_cpu(fw_usage->driver_region0_start_address_in_kb); 143 drv_size = le32_to_cpu(fw_usage->used_by_driver_region0_in_kb); 144 145 DRM_DEBUG("atom requested fw start at %08x %dkb and drv start at %08x %dkb\n", 146 fw_start_addr, 147 fw_size, 148 drv_start_addr, 149 drv_size); 150 151 if (amdgpu_sriov_vf(adev) && 152 ((fw_start_addr & (ATOM_VRAM_BLOCK_NEEDS_NO_RESERVATION << 153 ATOM_VRAM_OPERATION_FLAGS_SHIFT)) == 0)) { 154 /* Firmware request VRAM reservation for SR-IOV */ 155 adev->mman.fw_vram_usage_start_offset = (fw_start_addr & 156 (~ATOM_VRAM_OPERATION_FLAGS_MASK)) << 10; 157 adev->mman.fw_vram_usage_size = fw_size << 10; 158 } 159 160 if (amdgpu_sriov_vf(adev) && 161 ((drv_start_addr & (ATOM_VRAM_BLOCK_NEEDS_NO_RESERVATION << 162 ATOM_VRAM_OPERATION_FLAGS_SHIFT)) == 0)) { 163 /* driver request VRAM reservation for SR-IOV */ 164 adev->mman.drv_vram_usage_start_offset = (drv_start_addr & 165 (~ATOM_VRAM_OPERATION_FLAGS_MASK)) << 10; 166 adev->mman.drv_vram_usage_size = drv_size << 10; 167 } 168 169 *usage_bytes = 0; 170 return 0; 171 } 172 173 int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev) 174 { 175 struct atom_context *ctx = adev->mode_info.atom_context; 176 int index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, 177 vram_usagebyfirmware); 178 struct vram_usagebyfirmware_v2_1 *fw_usage_v2_1; 179 struct vram_usagebyfirmware_v2_2 *fw_usage_v2_2; 180 u16 data_offset; 181 u8 frev, crev; 182 int usage_bytes = 0; 183 184 /* Skip atomfirmware allocation for SRIOV VFs when dynamic crit regn is enabled */ 185 if (!(amdgpu_sriov_vf(adev) && adev->virt.is_dynamic_crit_regn_enabled)) { 186 if (amdgpu_atom_parse_data_header(ctx, index, NULL, &frev, &crev, &data_offset)) { 187 if (frev == 2 && crev == 1) { 188 fw_usage_v2_1 = 189 (struct vram_usagebyfirmware_v2_1 *)(ctx->bios + data_offset); 190 amdgpu_atomfirmware_allocate_fb_v2_1(adev, 191 fw_usage_v2_1, 192 &usage_bytes); 193 } else if (frev >= 2 && crev >= 2) { 194 fw_usage_v2_2 = 195 (struct vram_usagebyfirmware_v2_2 *)(ctx->bios + data_offset); 196 amdgpu_atomfirmware_allocate_fb_v2_2(adev, 197 fw_usage_v2_2, 198 &usage_bytes); 199 } 200 } 201 } 202 203 ctx->scratch_size_bytes = 0; 204 if (usage_bytes == 0) 205 usage_bytes = 20 * 1024; 206 /* allocate some scratch memory */ 207 ctx->scratch = kzalloc(usage_bytes, GFP_KERNEL); 208 if (!ctx->scratch) 209 return -ENOMEM; 210 ctx->scratch_size_bytes = usage_bytes; 211 return 0; 212 } 213 214 union igp_info { 215 struct atom_integrated_system_info_v1_11 v11; 216 struct atom_integrated_system_info_v1_12 v12; 217 struct atom_integrated_system_info_v2_1 v21; 218 struct atom_integrated_system_info_v2_3 v23; 219 }; 220 221 union umc_info { 222 struct atom_umc_info_v3_1 v31; 223 struct atom_umc_info_v3_2 v32; 224 struct atom_umc_info_v3_3 v33; 225 struct atom_umc_info_v4_0 v40; 226 }; 227 228 union vram_info { 229 struct atom_vram_info_header_v2_3 v23; 230 struct atom_vram_info_header_v2_4 v24; 231 struct atom_vram_info_header_v2_5 v25; 232 struct atom_vram_info_header_v2_6 v26; 233 struct atom_vram_info_header_v3_0 v30; 234 }; 235 236 union vram_module { 237 struct atom_vram_module_v9 v9; 238 struct atom_vram_module_v10 v10; 239 struct atom_vram_module_v11 v11; 240 struct atom_vram_module_v3_0 v30; 241 }; 242 243 static int convert_atom_mem_type_to_vram_type(struct amdgpu_device *adev, 244 int atom_mem_type) 245 { 246 int vram_type; 247 248 if (adev->flags & AMD_IS_APU) { 249 switch (atom_mem_type) { 250 case Ddr2MemType: 251 case LpDdr2MemType: 252 vram_type = AMDGPU_VRAM_TYPE_DDR2; 253 break; 254 case Ddr3MemType: 255 case LpDdr3MemType: 256 vram_type = AMDGPU_VRAM_TYPE_DDR3; 257 break; 258 case Ddr4MemType: 259 vram_type = AMDGPU_VRAM_TYPE_DDR4; 260 break; 261 case LpDdr4MemType: 262 vram_type = AMDGPU_VRAM_TYPE_LPDDR4; 263 break; 264 case Ddr5MemType: 265 vram_type = AMDGPU_VRAM_TYPE_DDR5; 266 break; 267 case LpDdr5MemType: 268 vram_type = AMDGPU_VRAM_TYPE_LPDDR5; 269 break; 270 default: 271 vram_type = AMDGPU_VRAM_TYPE_UNKNOWN; 272 break; 273 } 274 } else { 275 switch (atom_mem_type) { 276 case ATOM_DGPU_VRAM_TYPE_GDDR5: 277 vram_type = AMDGPU_VRAM_TYPE_GDDR5; 278 break; 279 case ATOM_DGPU_VRAM_TYPE_HBM2: 280 case ATOM_DGPU_VRAM_TYPE_HBM2E: 281 case ATOM_DGPU_VRAM_TYPE_HBM3: 282 vram_type = AMDGPU_VRAM_TYPE_HBM; 283 break; 284 case ATOM_DGPU_VRAM_TYPE_GDDR6: 285 vram_type = AMDGPU_VRAM_TYPE_GDDR6; 286 break; 287 case ATOM_DGPU_VRAM_TYPE_HBM3E: 288 vram_type = AMDGPU_VRAM_TYPE_HBM3E; 289 break; 290 default: 291 vram_type = AMDGPU_VRAM_TYPE_UNKNOWN; 292 break; 293 } 294 } 295 296 return vram_type; 297 } 298 299 static int amdgpu_atomfirmware_get_uma_carveout_info_v2_3(struct amdgpu_device *adev, 300 union igp_info *igp_info, 301 struct amdgpu_uma_carveout_info *uma_info) 302 { 303 struct uma_carveout_option *opts; 304 uint8_t nr_uma_options; 305 int i; 306 307 nr_uma_options = igp_info->v23.UMACarveoutIndexMax; 308 309 if (!nr_uma_options) 310 return -ENODEV; 311 312 if (nr_uma_options > MAX_UMA_OPTION_ENTRIES) { 313 drm_dbg(adev_to_drm(adev), 314 "Number of UMA options exceeds max table size. Options will not be parsed"); 315 return -EINVAL; 316 } 317 318 uma_info->num_entries = nr_uma_options; 319 uma_info->uma_option_index = igp_info->v23.UMACarveoutIndex; 320 321 opts = igp_info->v23.UMASizeControlOption; 322 323 for (i = 0; i < nr_uma_options; i++) { 324 if (!opts[i].memoryCarvedGb) 325 uma_info->entries[i].memory_carved_mb = 512; 326 else 327 uma_info->entries[i].memory_carved_mb = (uint32_t)opts[i].memoryCarvedGb << 10; 328 329 uma_info->entries[i].flags = opts[i].uma_carveout_option_flags.all8; 330 strscpy(uma_info->entries[i].name, opts[i].optionName, MAX_UMA_OPTION_NAME); 331 } 332 333 return 0; 334 } 335 336 int amdgpu_atomfirmware_get_uma_carveout_info(struct amdgpu_device *adev, 337 struct amdgpu_uma_carveout_info *uma_info) 338 { 339 struct amdgpu_mode_info *mode_info = &adev->mode_info; 340 union igp_info *igp_info; 341 u16 data_offset, size; 342 u8 frev, crev; 343 int index; 344 345 if (!(adev->flags & AMD_IS_APU)) 346 return -ENODEV; 347 348 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, 349 integratedsysteminfo); 350 351 if (!amdgpu_atom_parse_data_header(mode_info->atom_context, 352 index, &size, 353 &frev, &crev, &data_offset)) { 354 return -EINVAL; 355 } 356 357 igp_info = (union igp_info *) 358 (mode_info->atom_context->bios + data_offset); 359 360 switch (frev) { 361 case 2: 362 switch (crev) { 363 case 3: 364 return amdgpu_atomfirmware_get_uma_carveout_info_v2_3(adev, igp_info, uma_info); 365 break; 366 default: 367 break; 368 } 369 break; 370 default: 371 break; 372 } 373 return -ENODEV; 374 } 375 376 int 377 amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev, 378 int *vram_width, int *vram_type, 379 int *vram_vendor) 380 { 381 struct amdgpu_mode_info *mode_info = &adev->mode_info; 382 int index, i = 0; 383 u16 data_offset, size; 384 union igp_info *igp_info; 385 union vram_info *vram_info; 386 union umc_info *umc_info; 387 union vram_module *vram_module; 388 u8 frev, crev; 389 u8 mem_type; 390 u8 mem_vendor; 391 u32 mem_channel_number; 392 u32 mem_channel_width; 393 u32 module_id; 394 395 if (adev->flags & AMD_IS_APU) 396 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, 397 integratedsysteminfo); 398 else { 399 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 400 case IP_VERSION(12, 0, 0): 401 case IP_VERSION(12, 0, 1): 402 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, umc_info); 403 break; 404 default: 405 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, vram_info); 406 } 407 } 408 if (amdgpu_atom_parse_data_header(mode_info->atom_context, 409 index, &size, 410 &frev, &crev, &data_offset)) { 411 if (adev->flags & AMD_IS_APU) { 412 igp_info = (union igp_info *) 413 (mode_info->atom_context->bios + data_offset); 414 switch (frev) { 415 case 1: 416 switch (crev) { 417 case 11: 418 case 12: 419 mem_channel_number = igp_info->v11.umachannelnumber; 420 if (!mem_channel_number) 421 mem_channel_number = 1; 422 mem_type = igp_info->v11.memorytype; 423 if (mem_type == LpDdr5MemType) 424 mem_channel_width = 32; 425 else 426 mem_channel_width = 64; 427 if (vram_width) 428 *vram_width = mem_channel_number * mem_channel_width; 429 if (vram_type) 430 *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type); 431 break; 432 default: 433 return -EINVAL; 434 } 435 break; 436 case 2: 437 switch (crev) { 438 case 1: 439 case 2: 440 mem_channel_number = igp_info->v21.umachannelnumber; 441 if (!mem_channel_number) 442 mem_channel_number = 1; 443 mem_type = igp_info->v21.memorytype; 444 if (mem_type == LpDdr5MemType) 445 mem_channel_width = 32; 446 else 447 mem_channel_width = 64; 448 if (vram_width) 449 *vram_width = mem_channel_number * mem_channel_width; 450 if (vram_type) 451 *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type); 452 break; 453 case 3: 454 mem_channel_number = igp_info->v23.umachannelnumber; 455 if (!mem_channel_number) 456 mem_channel_number = 1; 457 mem_type = igp_info->v23.memorytype; 458 if (mem_type == LpDdr5MemType) 459 mem_channel_width = 32; 460 else 461 mem_channel_width = 64; 462 if (vram_width) 463 *vram_width = mem_channel_number * mem_channel_width; 464 if (vram_type) 465 *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type); 466 break; 467 default: 468 return -EINVAL; 469 } 470 break; 471 default: 472 return -EINVAL; 473 } 474 } else { 475 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 476 case IP_VERSION(12, 0, 0): 477 case IP_VERSION(12, 0, 1): 478 umc_info = (union umc_info *)(mode_info->atom_context->bios + data_offset); 479 480 if (frev == 4) { 481 switch (crev) { 482 case 0: 483 mem_channel_number = le32_to_cpu(umc_info->v40.channel_num); 484 mem_type = le32_to_cpu(umc_info->v40.vram_type); 485 mem_channel_width = le32_to_cpu(umc_info->v40.channel_width); 486 mem_vendor = RREG32(adev->bios_scratch_reg_offset + 4) & 0xF; 487 if (vram_vendor) 488 *vram_vendor = mem_vendor; 489 if (vram_type) 490 *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type); 491 if (vram_width) 492 *vram_width = mem_channel_number * (1 << mem_channel_width); 493 break; 494 default: 495 return -EINVAL; 496 } 497 } else 498 return -EINVAL; 499 break; 500 default: 501 vram_info = (union vram_info *) 502 (mode_info->atom_context->bios + data_offset); 503 504 module_id = (RREG32(adev->bios_scratch_reg_offset + 4) & 0x00ff0000) >> 16; 505 if (frev == 3) { 506 switch (crev) { 507 /* v30 */ 508 case 0: 509 vram_module = (union vram_module *)vram_info->v30.vram_module; 510 mem_vendor = (vram_module->v30.dram_vendor_id) & 0xF; 511 if (vram_vendor) 512 *vram_vendor = mem_vendor; 513 mem_type = vram_info->v30.memory_type; 514 if (vram_type) 515 *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type); 516 mem_channel_number = vram_info->v30.channel_num; 517 mem_channel_width = vram_info->v30.channel_width; 518 if (vram_width) 519 *vram_width = mem_channel_number * 16; 520 break; 521 default: 522 return -EINVAL; 523 } 524 } else if (frev == 2) { 525 switch (crev) { 526 /* v23 */ 527 case 3: 528 if (module_id > vram_info->v23.vram_module_num) 529 module_id = 0; 530 vram_module = (union vram_module *)vram_info->v23.vram_module; 531 while (i < module_id) { 532 vram_module = (union vram_module *) 533 ((u8 *)vram_module + vram_module->v9.vram_module_size); 534 i++; 535 } 536 mem_type = vram_module->v9.memory_type; 537 if (vram_type) 538 *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type); 539 mem_channel_number = vram_module->v9.channel_num; 540 mem_channel_width = vram_module->v9.channel_width; 541 if (vram_width) 542 *vram_width = mem_channel_number * (1 << mem_channel_width); 543 mem_vendor = (vram_module->v9.vender_rev_id) & 0xF; 544 if (vram_vendor) 545 *vram_vendor = mem_vendor; 546 break; 547 /* v24 */ 548 case 4: 549 if (module_id > vram_info->v24.vram_module_num) 550 module_id = 0; 551 vram_module = (union vram_module *)vram_info->v24.vram_module; 552 while (i < module_id) { 553 vram_module = (union vram_module *) 554 ((u8 *)vram_module + vram_module->v10.vram_module_size); 555 i++; 556 } 557 mem_type = vram_module->v10.memory_type; 558 if (vram_type) 559 *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type); 560 mem_channel_number = vram_module->v10.channel_num; 561 mem_channel_width = vram_module->v10.channel_width; 562 if (vram_width) 563 *vram_width = mem_channel_number * (1 << mem_channel_width); 564 mem_vendor = (vram_module->v10.vender_rev_id) & 0xF; 565 if (vram_vendor) 566 *vram_vendor = mem_vendor; 567 break; 568 /* v25 */ 569 case 5: 570 if (module_id > vram_info->v25.vram_module_num) 571 module_id = 0; 572 vram_module = (union vram_module *)vram_info->v25.vram_module; 573 while (i < module_id) { 574 vram_module = (union vram_module *) 575 ((u8 *)vram_module + vram_module->v11.vram_module_size); 576 i++; 577 } 578 mem_type = vram_module->v11.memory_type; 579 if (vram_type) 580 *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type); 581 mem_channel_number = vram_module->v11.channel_num; 582 mem_channel_width = vram_module->v11.channel_width; 583 if (vram_width) 584 *vram_width = mem_channel_number * (1 << mem_channel_width); 585 mem_vendor = (vram_module->v11.vender_rev_id) & 0xF; 586 if (vram_vendor) 587 *vram_vendor = mem_vendor; 588 break; 589 /* v26 */ 590 case 6: 591 if (module_id > vram_info->v26.vram_module_num) 592 module_id = 0; 593 vram_module = (union vram_module *)vram_info->v26.vram_module; 594 while (i < module_id) { 595 vram_module = (union vram_module *) 596 ((u8 *)vram_module + vram_module->v9.vram_module_size); 597 i++; 598 } 599 mem_type = vram_module->v9.memory_type; 600 if (vram_type) 601 *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type); 602 mem_channel_number = vram_module->v9.channel_num; 603 mem_channel_width = vram_module->v9.channel_width; 604 if (vram_width) 605 *vram_width = mem_channel_number * (1 << mem_channel_width); 606 mem_vendor = (vram_module->v9.vender_rev_id) & 0xF; 607 if (vram_vendor) 608 *vram_vendor = mem_vendor; 609 break; 610 default: 611 return -EINVAL; 612 } 613 } else { 614 /* invalid frev */ 615 return -EINVAL; 616 } 617 } 618 } 619 } 620 621 return 0; 622 } 623 624 /* 625 * Return true if vbios enabled ecc by default, if umc info table is available 626 * or false if ecc is not enabled or umc info table is not available 627 */ 628 bool amdgpu_atomfirmware_mem_ecc_supported(struct amdgpu_device *adev) 629 { 630 struct amdgpu_mode_info *mode_info = &adev->mode_info; 631 int index; 632 u16 data_offset, size; 633 union umc_info *umc_info; 634 u8 frev, crev; 635 bool mem_ecc_enabled = false; 636 u8 umc_config; 637 u32 umc_config1; 638 adev->ras_default_ecc_enabled = false; 639 640 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, 641 umc_info); 642 643 if (amdgpu_atom_parse_data_header(mode_info->atom_context, 644 index, &size, &frev, &crev, &data_offset)) { 645 umc_info = (union umc_info *)(mode_info->atom_context->bios + data_offset); 646 if (frev == 3) { 647 switch (crev) { 648 case 1: 649 umc_config = le32_to_cpu(umc_info->v31.umc_config); 650 mem_ecc_enabled = 651 (umc_config & UMC_CONFIG__DEFAULT_MEM_ECC_ENABLE) ? true : false; 652 break; 653 case 2: 654 umc_config = le32_to_cpu(umc_info->v32.umc_config); 655 mem_ecc_enabled = 656 (umc_config & UMC_CONFIG__DEFAULT_MEM_ECC_ENABLE) ? true : false; 657 break; 658 case 3: 659 umc_config = le32_to_cpu(umc_info->v33.umc_config); 660 umc_config1 = le32_to_cpu(umc_info->v33.umc_config1); 661 mem_ecc_enabled = 662 ((umc_config & UMC_CONFIG__DEFAULT_MEM_ECC_ENABLE) || 663 (umc_config1 & UMC_CONFIG1__ENABLE_ECC_CAPABLE)) ? true : false; 664 adev->ras_default_ecc_enabled = 665 (umc_config & UMC_CONFIG__DEFAULT_MEM_ECC_ENABLE) ? true : false; 666 break; 667 default: 668 /* unsupported crev */ 669 return false; 670 } 671 } else if (frev == 4) { 672 switch (crev) { 673 case 0: 674 umc_config = le32_to_cpu(umc_info->v40.umc_config); 675 umc_config1 = le32_to_cpu(umc_info->v40.umc_config1); 676 mem_ecc_enabled = 677 (umc_config1 & UMC_CONFIG1__ENABLE_ECC_CAPABLE) ? true : false; 678 adev->ras_default_ecc_enabled = 679 (umc_config & UMC_CONFIG__DEFAULT_MEM_ECC_ENABLE) ? true : false; 680 break; 681 default: 682 /* unsupported crev */ 683 return false; 684 } 685 } else { 686 /* unsupported frev */ 687 return false; 688 } 689 } 690 691 return mem_ecc_enabled; 692 } 693 694 /* 695 * Helper function to query sram ecc capablity 696 * 697 * @adev: amdgpu_device pointer 698 * 699 * Return true if vbios supports sram ecc or false if not 700 */ 701 bool amdgpu_atomfirmware_sram_ecc_supported(struct amdgpu_device *adev) 702 { 703 u32 fw_cap; 704 705 fw_cap = adev->mode_info.firmware_flags; 706 707 return (fw_cap & ATOM_FIRMWARE_CAP_SRAM_ECC) ? true : false; 708 } 709 710 /* 711 * Helper function to query dynamic boot config capability 712 * 713 * @adev: amdgpu_device pointer 714 * 715 * Return true if vbios supports dynamic boot config or false if not 716 */ 717 bool amdgpu_atomfirmware_dynamic_boot_config_supported(struct amdgpu_device *adev) 718 { 719 u32 fw_cap; 720 721 fw_cap = adev->mode_info.firmware_flags; 722 723 return (fw_cap & ATOM_FIRMWARE_CAP_DYNAMIC_BOOT_CFG_ENABLE) ? true : false; 724 } 725 726 /** 727 * amdgpu_atomfirmware_ras_rom_addr -- Get the RAS EEPROM addr from VBIOS 728 * @adev: amdgpu_device pointer 729 * @i2c_address: pointer to u8; if not NULL, will contain 730 * the RAS EEPROM address if the function returns true 731 * 732 * Return true if VBIOS supports RAS EEPROM address reporting, 733 * else return false. If true and @i2c_address is not NULL, 734 * will contain the RAS ROM address. 735 */ 736 bool amdgpu_atomfirmware_ras_rom_addr(struct amdgpu_device *adev, 737 u8 *i2c_address) 738 { 739 struct amdgpu_mode_info *mode_info = &adev->mode_info; 740 int index; 741 u16 data_offset, size; 742 union firmware_info *firmware_info; 743 u8 frev, crev; 744 745 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, 746 firmwareinfo); 747 748 if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context, 749 index, &size, &frev, &crev, 750 &data_offset)) { 751 /* support firmware_info 3.4 + */ 752 if ((frev == 3 && crev >= 4) || (frev > 3)) { 753 firmware_info = (union firmware_info *) 754 (mode_info->atom_context->bios + data_offset); 755 /* The ras_rom_i2c_slave_addr should ideally 756 * be a 19-bit EEPROM address, which would be 757 * used as is by the driver; see top of 758 * amdgpu_eeprom.c. 759 * 760 * When this is the case, 0 is of course a 761 * valid RAS EEPROM address, in which case, 762 * we'll drop the first "if (firm...)" and only 763 * leave the check for the pointer. 764 * 765 * The reason this works right now is because 766 * ras_rom_i2c_slave_addr contains the EEPROM 767 * device type qualifier 1010b in the top 4 768 * bits. 769 */ 770 if (firmware_info->v34.ras_rom_i2c_slave_addr) { 771 if (i2c_address) 772 *i2c_address = firmware_info->v34.ras_rom_i2c_slave_addr; 773 return true; 774 } 775 } 776 } 777 778 return false; 779 } 780 781 782 union smu_info { 783 struct atom_smu_info_v3_1 v31; 784 struct atom_smu_info_v4_0 v40; 785 }; 786 787 union gfx_info { 788 struct atom_gfx_info_v2_2 v22; 789 struct atom_gfx_info_v2_4 v24; 790 struct atom_gfx_info_v2_7 v27; 791 struct atom_gfx_info_v3_0 v30; 792 }; 793 794 int amdgpu_atomfirmware_get_clock_info(struct amdgpu_device *adev) 795 { 796 struct amdgpu_mode_info *mode_info = &adev->mode_info; 797 struct amdgpu_pll *spll = &adev->clock.spll; 798 struct amdgpu_pll *mpll = &adev->clock.mpll; 799 uint8_t frev, crev; 800 uint16_t data_offset; 801 int ret = -EINVAL, index; 802 803 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, 804 firmwareinfo); 805 if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL, 806 &frev, &crev, &data_offset)) { 807 union firmware_info *firmware_info = 808 (union firmware_info *)(mode_info->atom_context->bios + 809 data_offset); 810 811 adev->clock.default_sclk = 812 le32_to_cpu(firmware_info->v31.bootup_sclk_in10khz); 813 adev->clock.default_mclk = 814 le32_to_cpu(firmware_info->v31.bootup_mclk_in10khz); 815 816 adev->pm.current_sclk = adev->clock.default_sclk; 817 adev->pm.current_mclk = adev->clock.default_mclk; 818 819 ret = 0; 820 } 821 822 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, 823 smu_info); 824 if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL, 825 &frev, &crev, &data_offset)) { 826 union smu_info *smu_info = 827 (union smu_info *)(mode_info->atom_context->bios + 828 data_offset); 829 830 /* system clock */ 831 if (frev == 3) 832 spll->reference_freq = le32_to_cpu(smu_info->v31.core_refclk_10khz); 833 else if (frev == 4) 834 spll->reference_freq = le32_to_cpu(smu_info->v40.core_refclk_10khz); 835 836 spll->reference_div = 0; 837 spll->min_post_div = 1; 838 spll->max_post_div = 1; 839 spll->min_ref_div = 2; 840 spll->max_ref_div = 0xff; 841 spll->min_feedback_div = 4; 842 spll->max_feedback_div = 0xff; 843 spll->best_vco = 0; 844 845 ret = 0; 846 } 847 848 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, 849 umc_info); 850 if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL, 851 &frev, &crev, &data_offset)) { 852 union umc_info *umc_info = 853 (union umc_info *)(mode_info->atom_context->bios + 854 data_offset); 855 856 /* memory clock */ 857 mpll->reference_freq = le32_to_cpu(umc_info->v31.mem_refclk_10khz); 858 859 mpll->reference_div = 0; 860 mpll->min_post_div = 1; 861 mpll->max_post_div = 1; 862 mpll->min_ref_div = 2; 863 mpll->max_ref_div = 0xff; 864 mpll->min_feedback_div = 4; 865 mpll->max_feedback_div = 0xff; 866 mpll->best_vco = 0; 867 868 ret = 0; 869 } 870 871 /* if asic is Navi+, the rlc reference clock is used for system clock 872 * from vbios gfx_info table */ 873 if (adev->asic_type >= CHIP_NAVI10) { 874 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, 875 gfx_info); 876 if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL, 877 &frev, &crev, &data_offset)) { 878 union gfx_info *gfx_info = (union gfx_info *) 879 (mode_info->atom_context->bios + data_offset); 880 if ((frev == 3) || 881 (frev == 2 && crev == 6)) { 882 spll->reference_freq = le32_to_cpu(gfx_info->v30.golden_tsc_count_lower_refclk); 883 ret = 0; 884 } else if ((frev == 2) && 885 (crev >= 2) && 886 (crev != 6)) { 887 spll->reference_freq = le32_to_cpu(gfx_info->v22.rlc_gpu_timer_refclk); 888 ret = 0; 889 } else { 890 BUG(); 891 } 892 } 893 } 894 895 return ret; 896 } 897 898 int amdgpu_atomfirmware_get_gfx_info(struct amdgpu_device *adev) 899 { 900 struct amdgpu_mode_info *mode_info = &adev->mode_info; 901 int index; 902 uint8_t frev, crev; 903 uint16_t data_offset; 904 905 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, 906 gfx_info); 907 if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL, 908 &frev, &crev, &data_offset)) { 909 union gfx_info *gfx_info = (union gfx_info *) 910 (mode_info->atom_context->bios + data_offset); 911 if (frev == 2) { 912 switch (crev) { 913 case 4: 914 adev->gfx.config.max_shader_engines = gfx_info->v24.max_shader_engines; 915 adev->gfx.config.max_cu_per_sh = gfx_info->v24.max_cu_per_sh; 916 adev->gfx.config.max_sh_per_se = gfx_info->v24.max_sh_per_se; 917 adev->gfx.config.max_backends_per_se = gfx_info->v24.max_backends_per_se; 918 adev->gfx.config.max_texture_channel_caches = gfx_info->v24.max_texture_channel_caches; 919 adev->gfx.config.max_gprs = le16_to_cpu(gfx_info->v24.gc_num_gprs); 920 adev->gfx.config.max_gs_threads = gfx_info->v24.gc_num_max_gs_thds; 921 adev->gfx.config.gs_vgt_table_depth = gfx_info->v24.gc_gs_table_depth; 922 adev->gfx.config.gs_prim_buffer_depth = 923 le16_to_cpu(gfx_info->v24.gc_gsprim_buff_depth); 924 adev->gfx.config.double_offchip_lds_buf = 925 gfx_info->v24.gc_double_offchip_lds_buffer; 926 adev->gfx.cu_info.wave_front_size = le16_to_cpu(gfx_info->v24.gc_wave_size); 927 adev->gfx.cu_info.max_waves_per_simd = le16_to_cpu(gfx_info->v24.gc_max_waves_per_simd); 928 adev->gfx.cu_info.max_scratch_slots_per_cu = gfx_info->v24.gc_max_scratch_slots_per_cu; 929 adev->gfx.cu_info.lds_size = le16_to_cpu(gfx_info->v24.gc_lds_size); 930 return 0; 931 case 7: 932 adev->gfx.config.max_shader_engines = gfx_info->v27.max_shader_engines; 933 adev->gfx.config.max_cu_per_sh = gfx_info->v27.max_cu_per_sh; 934 adev->gfx.config.max_sh_per_se = gfx_info->v27.max_sh_per_se; 935 adev->gfx.config.max_backends_per_se = gfx_info->v27.max_backends_per_se; 936 adev->gfx.config.max_texture_channel_caches = gfx_info->v27.max_texture_channel_caches; 937 adev->gfx.config.max_gprs = le16_to_cpu(gfx_info->v27.gc_num_gprs); 938 adev->gfx.config.max_gs_threads = gfx_info->v27.gc_num_max_gs_thds; 939 adev->gfx.config.gs_vgt_table_depth = gfx_info->v27.gc_gs_table_depth; 940 adev->gfx.config.gs_prim_buffer_depth = le16_to_cpu(gfx_info->v27.gc_gsprim_buff_depth); 941 adev->gfx.config.double_offchip_lds_buf = gfx_info->v27.gc_double_offchip_lds_buffer; 942 adev->gfx.cu_info.wave_front_size = le16_to_cpu(gfx_info->v27.gc_wave_size); 943 adev->gfx.cu_info.max_waves_per_simd = le16_to_cpu(gfx_info->v27.gc_max_waves_per_simd); 944 adev->gfx.cu_info.max_scratch_slots_per_cu = gfx_info->v27.gc_max_scratch_slots_per_cu; 945 adev->gfx.cu_info.lds_size = le16_to_cpu(gfx_info->v27.gc_lds_size); 946 return 0; 947 default: 948 return -EINVAL; 949 } 950 } else if (frev == 3) { 951 switch (crev) { 952 case 0: 953 adev->gfx.config.max_shader_engines = gfx_info->v30.max_shader_engines; 954 adev->gfx.config.max_cu_per_sh = gfx_info->v30.max_cu_per_sh; 955 adev->gfx.config.max_sh_per_se = gfx_info->v30.max_sh_per_se; 956 adev->gfx.config.max_backends_per_se = gfx_info->v30.max_backends_per_se; 957 adev->gfx.config.max_texture_channel_caches = gfx_info->v30.max_texture_channel_caches; 958 return 0; 959 default: 960 return -EINVAL; 961 } 962 } else { 963 return -EINVAL; 964 } 965 966 } 967 return -EINVAL; 968 } 969 970 /* 971 * Helper function to query two stage mem training capability 972 * 973 * @adev: amdgpu_device pointer 974 * 975 * Return true if two stage mem training is supported or false if not 976 */ 977 bool amdgpu_atomfirmware_mem_training_supported(struct amdgpu_device *adev) 978 { 979 u32 fw_cap; 980 981 fw_cap = adev->mode_info.firmware_flags; 982 983 return (fw_cap & ATOM_FIRMWARE_CAP_ENABLE_2STAGE_BIST_TRAINING) ? true : false; 984 } 985 986 int amdgpu_atomfirmware_get_fw_reserved_fb_size(struct amdgpu_device *adev) 987 { 988 struct atom_context *ctx = adev->mode_info.atom_context; 989 union firmware_info *firmware_info; 990 int index; 991 u16 data_offset, size; 992 u8 frev, crev; 993 int fw_reserved_fb_size; 994 995 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, 996 firmwareinfo); 997 998 if (!amdgpu_atom_parse_data_header(ctx, index, &size, 999 &frev, &crev, &data_offset)) 1000 /* fail to parse data_header */ 1001 return 0; 1002 1003 firmware_info = (union firmware_info *)(ctx->bios + data_offset); 1004 1005 if (frev != 3) 1006 return -EINVAL; 1007 1008 switch (crev) { 1009 case 4: 1010 fw_reserved_fb_size = 1011 (firmware_info->v34.fw_reserved_size_in_kb << 10); 1012 break; 1013 case 5: 1014 fw_reserved_fb_size = 1015 (firmware_info->v35.fw_reserved_size_in_kb << 10); 1016 break; 1017 default: 1018 fw_reserved_fb_size = 0; 1019 break; 1020 } 1021 1022 return fw_reserved_fb_size; 1023 } 1024 1025 /* 1026 * Helper function to execute asic_init table 1027 * 1028 * @adev: amdgpu_device pointer 1029 * @fb_reset: flag to indicate whether fb is reset or not 1030 * 1031 * Return 0 if succeed, otherwise failed 1032 */ 1033 int amdgpu_atomfirmware_asic_init(struct amdgpu_device *adev, bool fb_reset) 1034 { 1035 struct amdgpu_mode_info *mode_info = &adev->mode_info; 1036 struct atom_context *ctx; 1037 uint8_t frev, crev; 1038 uint16_t data_offset; 1039 uint32_t bootup_sclk_in10khz, bootup_mclk_in10khz; 1040 struct asic_init_ps_allocation_v2_1 asic_init_ps_v2_1; 1041 int index; 1042 1043 if (!mode_info) 1044 return -EINVAL; 1045 1046 ctx = mode_info->atom_context; 1047 if (!ctx) 1048 return -EINVAL; 1049 1050 /* query bootup sclk/mclk from firmware_info table */ 1051 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, 1052 firmwareinfo); 1053 if (amdgpu_atom_parse_data_header(ctx, index, NULL, 1054 &frev, &crev, &data_offset)) { 1055 union firmware_info *firmware_info = 1056 (union firmware_info *)(ctx->bios + 1057 data_offset); 1058 1059 bootup_sclk_in10khz = 1060 le32_to_cpu(firmware_info->v31.bootup_sclk_in10khz); 1061 bootup_mclk_in10khz = 1062 le32_to_cpu(firmware_info->v31.bootup_mclk_in10khz); 1063 } else { 1064 return -EINVAL; 1065 } 1066 1067 index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1, 1068 asic_init); 1069 if (amdgpu_atom_parse_cmd_header(mode_info->atom_context, index, &frev, &crev)) { 1070 if (frev == 2 && crev >= 1) { 1071 memset(&asic_init_ps_v2_1, 0, sizeof(asic_init_ps_v2_1)); 1072 asic_init_ps_v2_1.param.engineparam.sclkfreqin10khz = bootup_sclk_in10khz; 1073 asic_init_ps_v2_1.param.memparam.mclkfreqin10khz = bootup_mclk_in10khz; 1074 asic_init_ps_v2_1.param.engineparam.engineflag = b3NORMAL_ENGINE_INIT; 1075 if (!fb_reset) 1076 asic_init_ps_v2_1.param.memparam.memflag = b3DRAM_SELF_REFRESH_EXIT; 1077 else 1078 asic_init_ps_v2_1.param.memparam.memflag = 0; 1079 } else { 1080 return -EINVAL; 1081 } 1082 } else { 1083 return -EINVAL; 1084 } 1085 1086 return amdgpu_atom_execute_table(ctx, ATOM_CMD_INIT, (uint32_t *)&asic_init_ps_v2_1, 1087 sizeof(asic_init_ps_v2_1)); 1088 } 1089