1 /* 2 * Copyright 2015 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * 23 */ 24 #include <linux/list.h> 25 #include <linux/slab.h> 26 #include <linux/pci.h> 27 #include <linux/acpi.h> 28 #include <drm/drmP.h> 29 #include <linux/firmware.h> 30 #include <drm/amdgpu_drm.h> 31 #include "amdgpu.h" 32 #include "cgs_linux.h" 33 #include "atom.h" 34 #include "amdgpu_ucode.h" 35 36 struct amdgpu_cgs_device { 37 struct cgs_device base; 38 struct amdgpu_device *adev; 39 }; 40 41 #define CGS_FUNC_ADEV \ 42 struct amdgpu_device *adev = \ 43 ((struct amdgpu_cgs_device *)cgs_device)->adev 44 45 static int amdgpu_cgs_gpu_mem_info(struct cgs_device *cgs_device, enum cgs_gpu_mem_type type, 46 uint64_t *mc_start, uint64_t *mc_size, 47 uint64_t *mem_size) 48 { 49 CGS_FUNC_ADEV; 50 switch(type) { 51 case CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB: 52 case CGS_GPU_MEM_TYPE__VISIBLE_FB: 53 *mc_start = 0; 54 *mc_size = adev->mc.visible_vram_size; 55 *mem_size = adev->mc.visible_vram_size - adev->vram_pin_size; 56 break; 57 case CGS_GPU_MEM_TYPE__INVISIBLE_CONTIG_FB: 58 case CGS_GPU_MEM_TYPE__INVISIBLE_FB: 59 *mc_start = adev->mc.visible_vram_size; 60 *mc_size = adev->mc.real_vram_size - adev->mc.visible_vram_size; 61 *mem_size = *mc_size; 62 break; 63 case CGS_GPU_MEM_TYPE__GART_CACHEABLE: 64 case CGS_GPU_MEM_TYPE__GART_WRITECOMBINE: 65 *mc_start = adev->mc.gtt_start; 66 *mc_size = adev->mc.gtt_size; 67 *mem_size = adev->mc.gtt_size - adev->gart_pin_size; 68 break; 69 default: 70 return -EINVAL; 71 } 72 73 return 0; 74 } 75 76 static int amdgpu_cgs_gmap_kmem(struct cgs_device *cgs_device, void *kmem, 77 uint64_t size, 78 uint64_t min_offset, uint64_t max_offset, 79 cgs_handle_t *kmem_handle, uint64_t *mcaddr) 80 { 81 CGS_FUNC_ADEV; 82 int ret; 83 struct amdgpu_bo *bo; 84 struct page *kmem_page = vmalloc_to_page(kmem); 85 int npages = ALIGN(size, PAGE_SIZE) >> PAGE_SHIFT; 86 87 struct sg_table *sg = drm_prime_pages_to_sg(&kmem_page, npages); 88 ret = amdgpu_bo_create(adev, size, PAGE_SIZE, false, 89 AMDGPU_GEM_DOMAIN_GTT, 0, sg, NULL, &bo); 90 if (ret) 91 return ret; 92 ret = amdgpu_bo_reserve(bo, false); 93 if (unlikely(ret != 0)) 94 return ret; 95 96 /* pin buffer into GTT */ 97 ret = amdgpu_bo_pin_restricted(bo, AMDGPU_GEM_DOMAIN_GTT, 98 min_offset, max_offset, mcaddr); 99 amdgpu_bo_unreserve(bo); 100 101 *kmem_handle = (cgs_handle_t)bo; 102 return ret; 103 } 104 105 static int amdgpu_cgs_gunmap_kmem(struct cgs_device *cgs_device, cgs_handle_t kmem_handle) 106 { 107 struct amdgpu_bo *obj = (struct amdgpu_bo *)kmem_handle; 108 109 if (obj) { 110 int r = amdgpu_bo_reserve(obj, false); 111 if (likely(r == 0)) { 112 amdgpu_bo_unpin(obj); 113 amdgpu_bo_unreserve(obj); 114 } 115 amdgpu_bo_unref(&obj); 116 117 } 118 return 0; 119 } 120 121 static int amdgpu_cgs_alloc_gpu_mem(struct cgs_device *cgs_device, 122 enum cgs_gpu_mem_type type, 123 uint64_t size, uint64_t align, 124 uint64_t min_offset, uint64_t max_offset, 125 cgs_handle_t *handle) 126 { 127 CGS_FUNC_ADEV; 128 uint16_t flags = 0; 129 int ret = 0; 130 uint32_t domain = 0; 131 struct amdgpu_bo *obj; 132 struct ttm_placement placement; 133 struct ttm_place place; 134 135 if (min_offset > max_offset) { 136 BUG_ON(1); 137 return -EINVAL; 138 } 139 140 /* fail if the alignment is not a power of 2 */ 141 if (((align != 1) && (align & (align - 1))) 142 || size == 0 || align == 0) 143 return -EINVAL; 144 145 146 switch(type) { 147 case CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB: 148 case CGS_GPU_MEM_TYPE__VISIBLE_FB: 149 flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; 150 domain = AMDGPU_GEM_DOMAIN_VRAM; 151 if (max_offset > adev->mc.real_vram_size) 152 return -EINVAL; 153 place.fpfn = min_offset >> PAGE_SHIFT; 154 place.lpfn = max_offset >> PAGE_SHIFT; 155 place.flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | 156 TTM_PL_FLAG_VRAM; 157 break; 158 case CGS_GPU_MEM_TYPE__INVISIBLE_CONTIG_FB: 159 case CGS_GPU_MEM_TYPE__INVISIBLE_FB: 160 flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS; 161 domain = AMDGPU_GEM_DOMAIN_VRAM; 162 if (adev->mc.visible_vram_size < adev->mc.real_vram_size) { 163 place.fpfn = 164 max(min_offset, adev->mc.visible_vram_size) >> PAGE_SHIFT; 165 place.lpfn = 166 min(max_offset, adev->mc.real_vram_size) >> PAGE_SHIFT; 167 place.flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | 168 TTM_PL_FLAG_VRAM; 169 } 170 171 break; 172 case CGS_GPU_MEM_TYPE__GART_CACHEABLE: 173 domain = AMDGPU_GEM_DOMAIN_GTT; 174 place.fpfn = min_offset >> PAGE_SHIFT; 175 place.lpfn = max_offset >> PAGE_SHIFT; 176 place.flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT; 177 break; 178 case CGS_GPU_MEM_TYPE__GART_WRITECOMBINE: 179 flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC; 180 domain = AMDGPU_GEM_DOMAIN_GTT; 181 place.fpfn = min_offset >> PAGE_SHIFT; 182 place.lpfn = max_offset >> PAGE_SHIFT; 183 place.flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_TT | 184 TTM_PL_FLAG_UNCACHED; 185 break; 186 default: 187 return -EINVAL; 188 } 189 190 191 *handle = 0; 192 193 placement.placement = &place; 194 placement.num_placement = 1; 195 placement.busy_placement = &place; 196 placement.num_busy_placement = 1; 197 198 ret = amdgpu_bo_create_restricted(adev, size, PAGE_SIZE, 199 true, domain, flags, 200 NULL, &placement, NULL, 201 &obj); 202 if (ret) { 203 DRM_ERROR("(%d) bo create failed\n", ret); 204 return ret; 205 } 206 *handle = (cgs_handle_t)obj; 207 208 return ret; 209 } 210 211 static int amdgpu_cgs_free_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t handle) 212 { 213 struct amdgpu_bo *obj = (struct amdgpu_bo *)handle; 214 215 if (obj) { 216 int r = amdgpu_bo_reserve(obj, false); 217 if (likely(r == 0)) { 218 amdgpu_bo_kunmap(obj); 219 amdgpu_bo_unpin(obj); 220 amdgpu_bo_unreserve(obj); 221 } 222 amdgpu_bo_unref(&obj); 223 224 } 225 return 0; 226 } 227 228 static int amdgpu_cgs_gmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t handle, 229 uint64_t *mcaddr) 230 { 231 int r; 232 u64 min_offset, max_offset; 233 struct amdgpu_bo *obj = (struct amdgpu_bo *)handle; 234 235 WARN_ON_ONCE(obj->placement.num_placement > 1); 236 237 min_offset = obj->placements[0].fpfn << PAGE_SHIFT; 238 max_offset = obj->placements[0].lpfn << PAGE_SHIFT; 239 240 r = amdgpu_bo_reserve(obj, false); 241 if (unlikely(r != 0)) 242 return r; 243 r = amdgpu_bo_pin_restricted(obj, AMDGPU_GEM_DOMAIN_GTT, 244 min_offset, max_offset, mcaddr); 245 amdgpu_bo_unreserve(obj); 246 return r; 247 } 248 249 static int amdgpu_cgs_gunmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t handle) 250 { 251 int r; 252 struct amdgpu_bo *obj = (struct amdgpu_bo *)handle; 253 r = amdgpu_bo_reserve(obj, false); 254 if (unlikely(r != 0)) 255 return r; 256 r = amdgpu_bo_unpin(obj); 257 amdgpu_bo_unreserve(obj); 258 return r; 259 } 260 261 static int amdgpu_cgs_kmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t handle, 262 void **map) 263 { 264 int r; 265 struct amdgpu_bo *obj = (struct amdgpu_bo *)handle; 266 r = amdgpu_bo_reserve(obj, false); 267 if (unlikely(r != 0)) 268 return r; 269 r = amdgpu_bo_kmap(obj, map); 270 amdgpu_bo_unreserve(obj); 271 return r; 272 } 273 274 static int amdgpu_cgs_kunmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t handle) 275 { 276 int r; 277 struct amdgpu_bo *obj = (struct amdgpu_bo *)handle; 278 r = amdgpu_bo_reserve(obj, false); 279 if (unlikely(r != 0)) 280 return r; 281 amdgpu_bo_kunmap(obj); 282 amdgpu_bo_unreserve(obj); 283 return r; 284 } 285 286 static uint32_t amdgpu_cgs_read_register(struct cgs_device *cgs_device, unsigned offset) 287 { 288 CGS_FUNC_ADEV; 289 return RREG32(offset); 290 } 291 292 static void amdgpu_cgs_write_register(struct cgs_device *cgs_device, unsigned offset, 293 uint32_t value) 294 { 295 CGS_FUNC_ADEV; 296 WREG32(offset, value); 297 } 298 299 static uint32_t amdgpu_cgs_read_ind_register(struct cgs_device *cgs_device, 300 enum cgs_ind_reg space, 301 unsigned index) 302 { 303 CGS_FUNC_ADEV; 304 switch (space) { 305 case CGS_IND_REG__MMIO: 306 return RREG32_IDX(index); 307 case CGS_IND_REG__PCIE: 308 return RREG32_PCIE(index); 309 case CGS_IND_REG__SMC: 310 return RREG32_SMC(index); 311 case CGS_IND_REG__UVD_CTX: 312 return RREG32_UVD_CTX(index); 313 case CGS_IND_REG__DIDT: 314 return RREG32_DIDT(index); 315 case CGS_IND_REG_GC_CAC: 316 return RREG32_GC_CAC(index); 317 case CGS_IND_REG__AUDIO_ENDPT: 318 DRM_ERROR("audio endpt register access not implemented.\n"); 319 return 0; 320 } 321 WARN(1, "Invalid indirect register space"); 322 return 0; 323 } 324 325 static void amdgpu_cgs_write_ind_register(struct cgs_device *cgs_device, 326 enum cgs_ind_reg space, 327 unsigned index, uint32_t value) 328 { 329 CGS_FUNC_ADEV; 330 switch (space) { 331 case CGS_IND_REG__MMIO: 332 return WREG32_IDX(index, value); 333 case CGS_IND_REG__PCIE: 334 return WREG32_PCIE(index, value); 335 case CGS_IND_REG__SMC: 336 return WREG32_SMC(index, value); 337 case CGS_IND_REG__UVD_CTX: 338 return WREG32_UVD_CTX(index, value); 339 case CGS_IND_REG__DIDT: 340 return WREG32_DIDT(index, value); 341 case CGS_IND_REG_GC_CAC: 342 return WREG32_GC_CAC(index, value); 343 case CGS_IND_REG__AUDIO_ENDPT: 344 DRM_ERROR("audio endpt register access not implemented.\n"); 345 return; 346 } 347 WARN(1, "Invalid indirect register space"); 348 } 349 350 static uint8_t amdgpu_cgs_read_pci_config_byte(struct cgs_device *cgs_device, unsigned addr) 351 { 352 CGS_FUNC_ADEV; 353 uint8_t val; 354 int ret = pci_read_config_byte(adev->pdev, addr, &val); 355 if (WARN(ret, "pci_read_config_byte error")) 356 return 0; 357 return val; 358 } 359 360 static uint16_t amdgpu_cgs_read_pci_config_word(struct cgs_device *cgs_device, unsigned addr) 361 { 362 CGS_FUNC_ADEV; 363 uint16_t val; 364 int ret = pci_read_config_word(adev->pdev, addr, &val); 365 if (WARN(ret, "pci_read_config_word error")) 366 return 0; 367 return val; 368 } 369 370 static uint32_t amdgpu_cgs_read_pci_config_dword(struct cgs_device *cgs_device, 371 unsigned addr) 372 { 373 CGS_FUNC_ADEV; 374 uint32_t val; 375 int ret = pci_read_config_dword(adev->pdev, addr, &val); 376 if (WARN(ret, "pci_read_config_dword error")) 377 return 0; 378 return val; 379 } 380 381 static void amdgpu_cgs_write_pci_config_byte(struct cgs_device *cgs_device, unsigned addr, 382 uint8_t value) 383 { 384 CGS_FUNC_ADEV; 385 int ret = pci_write_config_byte(adev->pdev, addr, value); 386 WARN(ret, "pci_write_config_byte error"); 387 } 388 389 static void amdgpu_cgs_write_pci_config_word(struct cgs_device *cgs_device, unsigned addr, 390 uint16_t value) 391 { 392 CGS_FUNC_ADEV; 393 int ret = pci_write_config_word(adev->pdev, addr, value); 394 WARN(ret, "pci_write_config_word error"); 395 } 396 397 static void amdgpu_cgs_write_pci_config_dword(struct cgs_device *cgs_device, unsigned addr, 398 uint32_t value) 399 { 400 CGS_FUNC_ADEV; 401 int ret = pci_write_config_dword(adev->pdev, addr, value); 402 WARN(ret, "pci_write_config_dword error"); 403 } 404 405 406 static int amdgpu_cgs_get_pci_resource(struct cgs_device *cgs_device, 407 enum cgs_resource_type resource_type, 408 uint64_t size, 409 uint64_t offset, 410 uint64_t *resource_base) 411 { 412 CGS_FUNC_ADEV; 413 414 if (resource_base == NULL) 415 return -EINVAL; 416 417 switch (resource_type) { 418 case CGS_RESOURCE_TYPE_MMIO: 419 if (adev->rmmio_size == 0) 420 return -ENOENT; 421 if ((offset + size) > adev->rmmio_size) 422 return -EINVAL; 423 *resource_base = adev->rmmio_base; 424 return 0; 425 case CGS_RESOURCE_TYPE_DOORBELL: 426 if (adev->doorbell.size == 0) 427 return -ENOENT; 428 if ((offset + size) > adev->doorbell.size) 429 return -EINVAL; 430 *resource_base = adev->doorbell.base; 431 return 0; 432 case CGS_RESOURCE_TYPE_FB: 433 case CGS_RESOURCE_TYPE_IO: 434 case CGS_RESOURCE_TYPE_ROM: 435 default: 436 return -EINVAL; 437 } 438 } 439 440 static const void *amdgpu_cgs_atom_get_data_table(struct cgs_device *cgs_device, 441 unsigned table, uint16_t *size, 442 uint8_t *frev, uint8_t *crev) 443 { 444 CGS_FUNC_ADEV; 445 uint16_t data_start; 446 447 if (amdgpu_atom_parse_data_header( 448 adev->mode_info.atom_context, table, size, 449 frev, crev, &data_start)) 450 return (uint8_t*)adev->mode_info.atom_context->bios + 451 data_start; 452 453 return NULL; 454 } 455 456 static int amdgpu_cgs_atom_get_cmd_table_revs(struct cgs_device *cgs_device, unsigned table, 457 uint8_t *frev, uint8_t *crev) 458 { 459 CGS_FUNC_ADEV; 460 461 if (amdgpu_atom_parse_cmd_header( 462 adev->mode_info.atom_context, table, 463 frev, crev)) 464 return 0; 465 466 return -EINVAL; 467 } 468 469 static int amdgpu_cgs_atom_exec_cmd_table(struct cgs_device *cgs_device, unsigned table, 470 void *args) 471 { 472 CGS_FUNC_ADEV; 473 474 return amdgpu_atom_execute_table( 475 adev->mode_info.atom_context, table, args); 476 } 477 478 static int amdgpu_cgs_create_pm_request(struct cgs_device *cgs_device, cgs_handle_t *request) 479 { 480 /* TODO */ 481 return 0; 482 } 483 484 static int amdgpu_cgs_destroy_pm_request(struct cgs_device *cgs_device, cgs_handle_t request) 485 { 486 /* TODO */ 487 return 0; 488 } 489 490 static int amdgpu_cgs_set_pm_request(struct cgs_device *cgs_device, cgs_handle_t request, 491 int active) 492 { 493 /* TODO */ 494 return 0; 495 } 496 497 static int amdgpu_cgs_pm_request_clock(struct cgs_device *cgs_device, cgs_handle_t request, 498 enum cgs_clock clock, unsigned freq) 499 { 500 /* TODO */ 501 return 0; 502 } 503 504 static int amdgpu_cgs_pm_request_engine(struct cgs_device *cgs_device, cgs_handle_t request, 505 enum cgs_engine engine, int powered) 506 { 507 /* TODO */ 508 return 0; 509 } 510 511 512 513 static int amdgpu_cgs_pm_query_clock_limits(struct cgs_device *cgs_device, 514 enum cgs_clock clock, 515 struct cgs_clock_limits *limits) 516 { 517 /* TODO */ 518 return 0; 519 } 520 521 static int amdgpu_cgs_set_camera_voltages(struct cgs_device *cgs_device, uint32_t mask, 522 const uint32_t *voltages) 523 { 524 DRM_ERROR("not implemented"); 525 return -EPERM; 526 } 527 528 struct cgs_irq_params { 529 unsigned src_id; 530 cgs_irq_source_set_func_t set; 531 cgs_irq_handler_func_t handler; 532 void *private_data; 533 }; 534 535 static int cgs_set_irq_state(struct amdgpu_device *adev, 536 struct amdgpu_irq_src *src, 537 unsigned type, 538 enum amdgpu_interrupt_state state) 539 { 540 struct cgs_irq_params *irq_params = 541 (struct cgs_irq_params *)src->data; 542 if (!irq_params) 543 return -EINVAL; 544 if (!irq_params->set) 545 return -EINVAL; 546 return irq_params->set(irq_params->private_data, 547 irq_params->src_id, 548 type, 549 (int)state); 550 } 551 552 static int cgs_process_irq(struct amdgpu_device *adev, 553 struct amdgpu_irq_src *source, 554 struct amdgpu_iv_entry *entry) 555 { 556 struct cgs_irq_params *irq_params = 557 (struct cgs_irq_params *)source->data; 558 if (!irq_params) 559 return -EINVAL; 560 if (!irq_params->handler) 561 return -EINVAL; 562 return irq_params->handler(irq_params->private_data, 563 irq_params->src_id, 564 entry->iv_entry); 565 } 566 567 static const struct amdgpu_irq_src_funcs cgs_irq_funcs = { 568 .set = cgs_set_irq_state, 569 .process = cgs_process_irq, 570 }; 571 572 static int amdgpu_cgs_add_irq_source(struct cgs_device *cgs_device, unsigned src_id, 573 unsigned num_types, 574 cgs_irq_source_set_func_t set, 575 cgs_irq_handler_func_t handler, 576 void *private_data) 577 { 578 CGS_FUNC_ADEV; 579 int ret = 0; 580 struct cgs_irq_params *irq_params; 581 struct amdgpu_irq_src *source = 582 kzalloc(sizeof(struct amdgpu_irq_src), GFP_KERNEL); 583 if (!source) 584 return -ENOMEM; 585 irq_params = 586 kzalloc(sizeof(struct cgs_irq_params), GFP_KERNEL); 587 if (!irq_params) { 588 kfree(source); 589 return -ENOMEM; 590 } 591 source->num_types = num_types; 592 source->funcs = &cgs_irq_funcs; 593 irq_params->src_id = src_id; 594 irq_params->set = set; 595 irq_params->handler = handler; 596 irq_params->private_data = private_data; 597 source->data = (void *)irq_params; 598 ret = amdgpu_irq_add_id(adev, src_id, source); 599 if (ret) { 600 kfree(irq_params); 601 kfree(source); 602 } 603 604 return ret; 605 } 606 607 static int amdgpu_cgs_irq_get(struct cgs_device *cgs_device, unsigned src_id, unsigned type) 608 { 609 CGS_FUNC_ADEV; 610 return amdgpu_irq_get(adev, adev->irq.sources[src_id], type); 611 } 612 613 static int amdgpu_cgs_irq_put(struct cgs_device *cgs_device, unsigned src_id, unsigned type) 614 { 615 CGS_FUNC_ADEV; 616 return amdgpu_irq_put(adev, adev->irq.sources[src_id], type); 617 } 618 619 int amdgpu_cgs_set_clockgating_state(struct cgs_device *cgs_device, 620 enum amd_ip_block_type block_type, 621 enum amd_clockgating_state state) 622 { 623 CGS_FUNC_ADEV; 624 int i, r = -1; 625 626 for (i = 0; i < adev->num_ip_blocks; i++) { 627 if (!adev->ip_block_status[i].valid) 628 continue; 629 630 if (adev->ip_blocks[i].type == block_type) { 631 r = adev->ip_blocks[i].funcs->set_clockgating_state( 632 (void *)adev, 633 state); 634 break; 635 } 636 } 637 return r; 638 } 639 640 int amdgpu_cgs_set_powergating_state(struct cgs_device *cgs_device, 641 enum amd_ip_block_type block_type, 642 enum amd_powergating_state state) 643 { 644 CGS_FUNC_ADEV; 645 int i, r = -1; 646 647 for (i = 0; i < adev->num_ip_blocks; i++) { 648 if (!adev->ip_block_status[i].valid) 649 continue; 650 651 if (adev->ip_blocks[i].type == block_type) { 652 r = adev->ip_blocks[i].funcs->set_powergating_state( 653 (void *)adev, 654 state); 655 break; 656 } 657 } 658 return r; 659 } 660 661 662 static uint32_t fw_type_convert(struct cgs_device *cgs_device, uint32_t fw_type) 663 { 664 CGS_FUNC_ADEV; 665 enum AMDGPU_UCODE_ID result = AMDGPU_UCODE_ID_MAXIMUM; 666 667 switch (fw_type) { 668 case CGS_UCODE_ID_SDMA0: 669 result = AMDGPU_UCODE_ID_SDMA0; 670 break; 671 case CGS_UCODE_ID_SDMA1: 672 result = AMDGPU_UCODE_ID_SDMA1; 673 break; 674 case CGS_UCODE_ID_CP_CE: 675 result = AMDGPU_UCODE_ID_CP_CE; 676 break; 677 case CGS_UCODE_ID_CP_PFP: 678 result = AMDGPU_UCODE_ID_CP_PFP; 679 break; 680 case CGS_UCODE_ID_CP_ME: 681 result = AMDGPU_UCODE_ID_CP_ME; 682 break; 683 case CGS_UCODE_ID_CP_MEC: 684 case CGS_UCODE_ID_CP_MEC_JT1: 685 result = AMDGPU_UCODE_ID_CP_MEC1; 686 break; 687 case CGS_UCODE_ID_CP_MEC_JT2: 688 if (adev->asic_type == CHIP_TONGA || adev->asic_type == CHIP_POLARIS11 689 || adev->asic_type == CHIP_POLARIS10) 690 result = AMDGPU_UCODE_ID_CP_MEC2; 691 else 692 result = AMDGPU_UCODE_ID_CP_MEC1; 693 break; 694 case CGS_UCODE_ID_RLC_G: 695 result = AMDGPU_UCODE_ID_RLC_G; 696 break; 697 default: 698 DRM_ERROR("Firmware type not supported\n"); 699 } 700 return result; 701 } 702 703 static int amdgpu_cgs_rel_firmware(struct cgs_device *cgs_device, enum cgs_ucode_id type) 704 { 705 CGS_FUNC_ADEV; 706 if ((CGS_UCODE_ID_SMU == type) || (CGS_UCODE_ID_SMU_SK == type)) { 707 release_firmware(adev->pm.fw); 708 return 0; 709 } 710 /* cannot release other firmware because they are not created by cgs */ 711 return -EINVAL; 712 } 713 714 static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device, 715 enum cgs_ucode_id type, 716 struct cgs_firmware_info *info) 717 { 718 CGS_FUNC_ADEV; 719 720 if ((CGS_UCODE_ID_SMU != type) && (CGS_UCODE_ID_SMU_SK != type)) { 721 uint64_t gpu_addr; 722 uint32_t data_size; 723 const struct gfx_firmware_header_v1_0 *header; 724 enum AMDGPU_UCODE_ID id; 725 struct amdgpu_firmware_info *ucode; 726 727 id = fw_type_convert(cgs_device, type); 728 ucode = &adev->firmware.ucode[id]; 729 if (ucode->fw == NULL) 730 return -EINVAL; 731 732 gpu_addr = ucode->mc_addr; 733 header = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data; 734 data_size = le32_to_cpu(header->header.ucode_size_bytes); 735 736 if ((type == CGS_UCODE_ID_CP_MEC_JT1) || 737 (type == CGS_UCODE_ID_CP_MEC_JT2)) { 738 gpu_addr += le32_to_cpu(header->jt_offset) << 2; 739 data_size = le32_to_cpu(header->jt_size) << 2; 740 } 741 info->mc_addr = gpu_addr; 742 info->image_size = data_size; 743 info->version = (uint16_t)le32_to_cpu(header->header.ucode_version); 744 info->feature_version = (uint16_t)le32_to_cpu(header->ucode_feature_version); 745 } else { 746 char fw_name[30] = {0}; 747 int err = 0; 748 uint32_t ucode_size; 749 uint32_t ucode_start_address; 750 const uint8_t *src; 751 const struct smc_firmware_header_v1_0 *hdr; 752 753 if (!adev->pm.fw) { 754 switch (adev->asic_type) { 755 case CHIP_TOPAZ: 756 strcpy(fw_name, "amdgpu/topaz_smc.bin"); 757 break; 758 case CHIP_TONGA: 759 strcpy(fw_name, "amdgpu/tonga_smc.bin"); 760 break; 761 case CHIP_FIJI: 762 strcpy(fw_name, "amdgpu/fiji_smc.bin"); 763 break; 764 case CHIP_POLARIS11: 765 if (type == CGS_UCODE_ID_SMU) 766 strcpy(fw_name, "amdgpu/polaris11_smc.bin"); 767 else if (type == CGS_UCODE_ID_SMU_SK) 768 strcpy(fw_name, "amdgpu/polaris11_smc_sk.bin"); 769 break; 770 case CHIP_POLARIS10: 771 if (type == CGS_UCODE_ID_SMU) 772 strcpy(fw_name, "amdgpu/polaris10_smc.bin"); 773 else if (type == CGS_UCODE_ID_SMU_SK) 774 strcpy(fw_name, "amdgpu/polaris10_smc_sk.bin"); 775 break; 776 default: 777 DRM_ERROR("SMC firmware not supported\n"); 778 return -EINVAL; 779 } 780 781 err = request_firmware(&adev->pm.fw, fw_name, adev->dev); 782 if (err) { 783 DRM_ERROR("Failed to request firmware\n"); 784 return err; 785 } 786 787 err = amdgpu_ucode_validate(adev->pm.fw); 788 if (err) { 789 DRM_ERROR("Failed to load firmware \"%s\"", fw_name); 790 release_firmware(adev->pm.fw); 791 adev->pm.fw = NULL; 792 return err; 793 } 794 } 795 796 hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data; 797 amdgpu_ucode_print_smc_hdr(&hdr->header); 798 adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version); 799 ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes); 800 ucode_start_address = le32_to_cpu(hdr->ucode_start_addr); 801 src = (const uint8_t *)(adev->pm.fw->data + 802 le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 803 804 info->version = adev->pm.fw_version; 805 info->image_size = ucode_size; 806 info->ucode_start_address = ucode_start_address; 807 info->kptr = (void *)src; 808 } 809 return 0; 810 } 811 812 static int amdgpu_cgs_query_system_info(struct cgs_device *cgs_device, 813 struct cgs_system_info *sys_info) 814 { 815 CGS_FUNC_ADEV; 816 817 if (NULL == sys_info) 818 return -ENODEV; 819 820 if (sizeof(struct cgs_system_info) != sys_info->size) 821 return -ENODEV; 822 823 switch (sys_info->info_id) { 824 case CGS_SYSTEM_INFO_ADAPTER_BDF_ID: 825 sys_info->value = adev->pdev->devfn | (adev->pdev->bus->number << 8); 826 break; 827 case CGS_SYSTEM_INFO_PCIE_GEN_INFO: 828 sys_info->value = adev->pm.pcie_gen_mask; 829 break; 830 case CGS_SYSTEM_INFO_PCIE_MLW: 831 sys_info->value = adev->pm.pcie_mlw_mask; 832 break; 833 case CGS_SYSTEM_INFO_PCIE_DEV: 834 sys_info->value = adev->pdev->device; 835 break; 836 case CGS_SYSTEM_INFO_PCIE_REV: 837 sys_info->value = adev->pdev->revision; 838 break; 839 case CGS_SYSTEM_INFO_CG_FLAGS: 840 sys_info->value = adev->cg_flags; 841 break; 842 case CGS_SYSTEM_INFO_PG_FLAGS: 843 sys_info->value = adev->pg_flags; 844 break; 845 case CGS_SYSTEM_INFO_GFX_CU_INFO: 846 sys_info->value = adev->gfx.cu_info.number; 847 break; 848 case CGS_SYSTEM_INFO_GFX_SE_INFO: 849 sys_info->value = adev->gfx.config.max_shader_engines; 850 break; 851 default: 852 return -ENODEV; 853 } 854 855 return 0; 856 } 857 858 static int amdgpu_cgs_get_active_displays_info(struct cgs_device *cgs_device, 859 struct cgs_display_info *info) 860 { 861 CGS_FUNC_ADEV; 862 struct amdgpu_crtc *amdgpu_crtc; 863 struct drm_device *ddev = adev->ddev; 864 struct drm_crtc *crtc; 865 uint32_t line_time_us, vblank_lines; 866 struct cgs_mode_info *mode_info; 867 868 if (info == NULL) 869 return -EINVAL; 870 871 mode_info = info->mode_info; 872 873 if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) { 874 list_for_each_entry(crtc, 875 &ddev->mode_config.crtc_list, head) { 876 amdgpu_crtc = to_amdgpu_crtc(crtc); 877 if (crtc->enabled) { 878 info->active_display_mask |= (1 << amdgpu_crtc->crtc_id); 879 info->display_count++; 880 } 881 if (mode_info != NULL && 882 crtc->enabled && amdgpu_crtc->enabled && 883 amdgpu_crtc->hw_mode.clock) { 884 line_time_us = (amdgpu_crtc->hw_mode.crtc_htotal * 1000) / 885 amdgpu_crtc->hw_mode.clock; 886 vblank_lines = amdgpu_crtc->hw_mode.crtc_vblank_end - 887 amdgpu_crtc->hw_mode.crtc_vdisplay + 888 (amdgpu_crtc->v_border * 2); 889 mode_info->vblank_time_us = vblank_lines * line_time_us; 890 mode_info->refresh_rate = drm_mode_vrefresh(&amdgpu_crtc->hw_mode); 891 mode_info->ref_clock = adev->clock.spll.reference_freq; 892 mode_info = NULL; 893 } 894 } 895 } 896 897 return 0; 898 } 899 900 901 static int amdgpu_cgs_notify_dpm_enabled(struct cgs_device *cgs_device, bool enabled) 902 { 903 CGS_FUNC_ADEV; 904 905 adev->pm.dpm_enabled = enabled; 906 907 return 0; 908 } 909 910 /** \brief evaluate acpi namespace object, handle or pathname must be valid 911 * \param cgs_device 912 * \param info input/output arguments for the control method 913 * \return status 914 */ 915 916 #if defined(CONFIG_ACPI) 917 static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device, 918 struct cgs_acpi_method_info *info) 919 { 920 CGS_FUNC_ADEV; 921 acpi_handle handle; 922 struct acpi_object_list input; 923 struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; 924 union acpi_object *params, *obj; 925 uint8_t name[5] = {'\0'}; 926 struct cgs_acpi_method_argument *argument; 927 uint32_t i, count; 928 acpi_status status; 929 int result; 930 931 handle = ACPI_HANDLE(&adev->pdev->dev); 932 if (!handle) 933 return -ENODEV; 934 935 memset(&input, 0, sizeof(struct acpi_object_list)); 936 937 /* validate input info */ 938 if (info->size != sizeof(struct cgs_acpi_method_info)) 939 return -EINVAL; 940 941 input.count = info->input_count; 942 if (info->input_count > 0) { 943 if (info->pinput_argument == NULL) 944 return -EINVAL; 945 argument = info->pinput_argument; 946 for (i = 0; i < info->input_count; i++) { 947 if (((argument->type == ACPI_TYPE_STRING) || 948 (argument->type == ACPI_TYPE_BUFFER)) && 949 (argument->pointer == NULL)) 950 return -EINVAL; 951 argument++; 952 } 953 } 954 955 if (info->output_count > 0) { 956 if (info->poutput_argument == NULL) 957 return -EINVAL; 958 argument = info->poutput_argument; 959 for (i = 0; i < info->output_count; i++) { 960 if (((argument->type == ACPI_TYPE_STRING) || 961 (argument->type == ACPI_TYPE_BUFFER)) 962 && (argument->pointer == NULL)) 963 return -EINVAL; 964 argument++; 965 } 966 } 967 968 /* The path name passed to acpi_evaluate_object should be null terminated */ 969 if ((info->field & CGS_ACPI_FIELD_METHOD_NAME) != 0) { 970 strncpy(name, (char *)&(info->name), sizeof(uint32_t)); 971 name[4] = '\0'; 972 } 973 974 /* parse input parameters */ 975 if (input.count > 0) { 976 input.pointer = params = 977 kzalloc(sizeof(union acpi_object) * input.count, GFP_KERNEL); 978 if (params == NULL) 979 return -EINVAL; 980 981 argument = info->pinput_argument; 982 983 for (i = 0; i < input.count; i++) { 984 params->type = argument->type; 985 switch (params->type) { 986 case ACPI_TYPE_INTEGER: 987 params->integer.value = argument->value; 988 break; 989 case ACPI_TYPE_STRING: 990 params->string.length = argument->data_length; 991 params->string.pointer = argument->pointer; 992 break; 993 case ACPI_TYPE_BUFFER: 994 params->buffer.length = argument->data_length; 995 params->buffer.pointer = argument->pointer; 996 break; 997 default: 998 break; 999 } 1000 params++; 1001 argument++; 1002 } 1003 } 1004 1005 /* parse output info */ 1006 count = info->output_count; 1007 argument = info->poutput_argument; 1008 1009 /* evaluate the acpi method */ 1010 status = acpi_evaluate_object(handle, name, &input, &output); 1011 1012 if (ACPI_FAILURE(status)) { 1013 result = -EIO; 1014 goto free_input; 1015 } 1016 1017 /* return the output info */ 1018 obj = output.pointer; 1019 1020 if (count > 1) { 1021 if ((obj->type != ACPI_TYPE_PACKAGE) || 1022 (obj->package.count != count)) { 1023 result = -EIO; 1024 goto free_obj; 1025 } 1026 params = obj->package.elements; 1027 } else 1028 params = obj; 1029 1030 if (params == NULL) { 1031 result = -EIO; 1032 goto free_obj; 1033 } 1034 1035 for (i = 0; i < count; i++) { 1036 if (argument->type != params->type) { 1037 result = -EIO; 1038 goto free_obj; 1039 } 1040 switch (params->type) { 1041 case ACPI_TYPE_INTEGER: 1042 argument->value = params->integer.value; 1043 break; 1044 case ACPI_TYPE_STRING: 1045 if ((params->string.length != argument->data_length) || 1046 (params->string.pointer == NULL)) { 1047 result = -EIO; 1048 goto free_obj; 1049 } 1050 strncpy(argument->pointer, 1051 params->string.pointer, 1052 params->string.length); 1053 break; 1054 case ACPI_TYPE_BUFFER: 1055 if (params->buffer.pointer == NULL) { 1056 result = -EIO; 1057 goto free_obj; 1058 } 1059 memcpy(argument->pointer, 1060 params->buffer.pointer, 1061 argument->data_length); 1062 break; 1063 default: 1064 break; 1065 } 1066 argument++; 1067 params++; 1068 } 1069 1070 result = 0; 1071 free_obj: 1072 kfree(obj); 1073 free_input: 1074 kfree((void *)input.pointer); 1075 return result; 1076 } 1077 #else 1078 static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device, 1079 struct cgs_acpi_method_info *info) 1080 { 1081 return -EIO; 1082 } 1083 #endif 1084 1085 static int amdgpu_cgs_call_acpi_method(struct cgs_device *cgs_device, 1086 uint32_t acpi_method, 1087 uint32_t acpi_function, 1088 void *pinput, void *poutput, 1089 uint32_t output_count, 1090 uint32_t input_size, 1091 uint32_t output_size) 1092 { 1093 struct cgs_acpi_method_argument acpi_input[2] = { {0}, {0} }; 1094 struct cgs_acpi_method_argument acpi_output = {0}; 1095 struct cgs_acpi_method_info info = {0}; 1096 1097 acpi_input[0].type = CGS_ACPI_TYPE_INTEGER; 1098 acpi_input[0].data_length = sizeof(uint32_t); 1099 acpi_input[0].value = acpi_function; 1100 1101 acpi_input[1].type = CGS_ACPI_TYPE_BUFFER; 1102 acpi_input[1].data_length = input_size; 1103 acpi_input[1].pointer = pinput; 1104 1105 acpi_output.type = CGS_ACPI_TYPE_BUFFER; 1106 acpi_output.data_length = output_size; 1107 acpi_output.pointer = poutput; 1108 1109 info.size = sizeof(struct cgs_acpi_method_info); 1110 info.field = CGS_ACPI_FIELD_METHOD_NAME | CGS_ACPI_FIELD_INPUT_ARGUMENT_COUNT; 1111 info.input_count = 2; 1112 info.name = acpi_method; 1113 info.pinput_argument = acpi_input; 1114 info.output_count = output_count; 1115 info.poutput_argument = &acpi_output; 1116 1117 return amdgpu_cgs_acpi_eval_object(cgs_device, &info); 1118 } 1119 1120 static const struct cgs_ops amdgpu_cgs_ops = { 1121 amdgpu_cgs_gpu_mem_info, 1122 amdgpu_cgs_gmap_kmem, 1123 amdgpu_cgs_gunmap_kmem, 1124 amdgpu_cgs_alloc_gpu_mem, 1125 amdgpu_cgs_free_gpu_mem, 1126 amdgpu_cgs_gmap_gpu_mem, 1127 amdgpu_cgs_gunmap_gpu_mem, 1128 amdgpu_cgs_kmap_gpu_mem, 1129 amdgpu_cgs_kunmap_gpu_mem, 1130 amdgpu_cgs_read_register, 1131 amdgpu_cgs_write_register, 1132 amdgpu_cgs_read_ind_register, 1133 amdgpu_cgs_write_ind_register, 1134 amdgpu_cgs_read_pci_config_byte, 1135 amdgpu_cgs_read_pci_config_word, 1136 amdgpu_cgs_read_pci_config_dword, 1137 amdgpu_cgs_write_pci_config_byte, 1138 amdgpu_cgs_write_pci_config_word, 1139 amdgpu_cgs_write_pci_config_dword, 1140 amdgpu_cgs_get_pci_resource, 1141 amdgpu_cgs_atom_get_data_table, 1142 amdgpu_cgs_atom_get_cmd_table_revs, 1143 amdgpu_cgs_atom_exec_cmd_table, 1144 amdgpu_cgs_create_pm_request, 1145 amdgpu_cgs_destroy_pm_request, 1146 amdgpu_cgs_set_pm_request, 1147 amdgpu_cgs_pm_request_clock, 1148 amdgpu_cgs_pm_request_engine, 1149 amdgpu_cgs_pm_query_clock_limits, 1150 amdgpu_cgs_set_camera_voltages, 1151 amdgpu_cgs_get_firmware_info, 1152 amdgpu_cgs_rel_firmware, 1153 amdgpu_cgs_set_powergating_state, 1154 amdgpu_cgs_set_clockgating_state, 1155 amdgpu_cgs_get_active_displays_info, 1156 amdgpu_cgs_notify_dpm_enabled, 1157 amdgpu_cgs_call_acpi_method, 1158 amdgpu_cgs_query_system_info, 1159 }; 1160 1161 static const struct cgs_os_ops amdgpu_cgs_os_ops = { 1162 amdgpu_cgs_add_irq_source, 1163 amdgpu_cgs_irq_get, 1164 amdgpu_cgs_irq_put 1165 }; 1166 1167 struct cgs_device *amdgpu_cgs_create_device(struct amdgpu_device *adev) 1168 { 1169 struct amdgpu_cgs_device *cgs_device = 1170 kmalloc(sizeof(*cgs_device), GFP_KERNEL); 1171 1172 if (!cgs_device) { 1173 DRM_ERROR("Couldn't allocate CGS device structure\n"); 1174 return NULL; 1175 } 1176 1177 cgs_device->base.ops = &amdgpu_cgs_ops; 1178 cgs_device->base.os_ops = &amdgpu_cgs_os_ops; 1179 cgs_device->adev = adev; 1180 1181 return (struct cgs_device *)cgs_device; 1182 } 1183 1184 void amdgpu_cgs_destroy_device(struct cgs_device *cgs_device) 1185 { 1186 kfree(cgs_device); 1187 } 1188