1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 */ 28 29 #include <drm/drm_debugfs.h> 30 #include <drm/drm_device.h> 31 #include <drm/drm_file.h> 32 #include <drm/drm_pci.h> 33 #include <drm/radeon_drm.h> 34 35 #include "radeon.h" 36 37 void radeon_gem_object_free(struct drm_gem_object *gobj) 38 { 39 struct radeon_bo *robj = gem_to_radeon_bo(gobj); 40 41 if (robj) { 42 radeon_mn_unregister(robj); 43 radeon_bo_unref(&robj); 44 } 45 } 46 47 int radeon_gem_object_create(struct radeon_device *rdev, unsigned long size, 48 int alignment, int initial_domain, 49 u32 flags, bool kernel, 50 struct drm_gem_object **obj) 51 { 52 struct radeon_bo *robj; 53 unsigned long max_size; 54 int r; 55 56 *obj = NULL; 57 /* At least align on page size */ 58 if (alignment < PAGE_SIZE) { 59 alignment = PAGE_SIZE; 60 } 61 62 /* Maximum bo size is the unpinned gtt size since we use the gtt to 63 * handle vram to system pool migrations. 64 */ 65 max_size = rdev->mc.gtt_size - rdev->gart_pin_size; 66 if (size > max_size) { 67 DRM_DEBUG("Allocation size %ldMb bigger than %ldMb limit\n", 68 size >> 20, max_size >> 20); 69 return -ENOMEM; 70 } 71 72 retry: 73 r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain, 74 flags, NULL, NULL, &robj); 75 if (r) { 76 if (r != -ERESTARTSYS) { 77 if (initial_domain == RADEON_GEM_DOMAIN_VRAM) { 78 initial_domain |= RADEON_GEM_DOMAIN_GTT; 79 goto retry; 80 } 81 DRM_ERROR("Failed to allocate GEM object (%ld, %d, %u, %d)\n", 82 size, initial_domain, alignment, r); 83 } 84 return r; 85 } 86 *obj = &robj->gem_base; 87 robj->pid = task_pid_nr(current); 88 89 mutex_lock(&rdev->gem.mutex); 90 list_add_tail(&robj->list, &rdev->gem.objects); 91 mutex_unlock(&rdev->gem.mutex); 92 93 return 0; 94 } 95 96 static int radeon_gem_set_domain(struct drm_gem_object *gobj, 97 uint32_t rdomain, uint32_t wdomain) 98 { 99 struct radeon_bo *robj; 100 uint32_t domain; 101 long r; 102 103 /* FIXME: reeimplement */ 104 robj = gem_to_radeon_bo(gobj); 105 /* work out where to validate the buffer to */ 106 domain = wdomain; 107 if (!domain) { 108 domain = rdomain; 109 } 110 if (!domain) { 111 /* Do nothings */ 112 pr_warn("Set domain without domain !\n"); 113 return 0; 114 } 115 if (domain == RADEON_GEM_DOMAIN_CPU) { 116 /* Asking for cpu access wait for object idle */ 117 r = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true, 30 * HZ); 118 if (!r) 119 r = -EBUSY; 120 121 if (r < 0 && r != -EINTR) { 122 pr_err("Failed to wait for object: %li\n", r); 123 return r; 124 } 125 } 126 if (domain == RADEON_GEM_DOMAIN_VRAM && robj->prime_shared_count) { 127 /* A BO that is associated with a dma-buf cannot be sensibly migrated to VRAM */ 128 return -EINVAL; 129 } 130 return 0; 131 } 132 133 int radeon_gem_init(struct radeon_device *rdev) 134 { 135 INIT_LIST_HEAD(&rdev->gem.objects); 136 return 0; 137 } 138 139 void radeon_gem_fini(struct radeon_device *rdev) 140 { 141 radeon_bo_force_delete(rdev); 142 } 143 144 /* 145 * Call from drm_gem_handle_create which appear in both new and open ioctl 146 * case. 147 */ 148 int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv) 149 { 150 struct radeon_bo *rbo = gem_to_radeon_bo(obj); 151 struct radeon_device *rdev = rbo->rdev; 152 struct radeon_fpriv *fpriv = file_priv->driver_priv; 153 struct radeon_vm *vm = &fpriv->vm; 154 struct radeon_bo_va *bo_va; 155 int r; 156 157 if ((rdev->family < CHIP_CAYMAN) || 158 (!rdev->accel_working)) { 159 return 0; 160 } 161 162 r = radeon_bo_reserve(rbo, false); 163 if (r) { 164 return r; 165 } 166 167 bo_va = radeon_vm_bo_find(vm, rbo); 168 if (!bo_va) { 169 bo_va = radeon_vm_bo_add(rdev, vm, rbo); 170 } else { 171 ++bo_va->ref_count; 172 } 173 radeon_bo_unreserve(rbo); 174 175 return 0; 176 } 177 178 void radeon_gem_object_close(struct drm_gem_object *obj, 179 struct drm_file *file_priv) 180 { 181 struct radeon_bo *rbo = gem_to_radeon_bo(obj); 182 struct radeon_device *rdev = rbo->rdev; 183 struct radeon_fpriv *fpriv = file_priv->driver_priv; 184 struct radeon_vm *vm = &fpriv->vm; 185 struct radeon_bo_va *bo_va; 186 int r; 187 188 if ((rdev->family < CHIP_CAYMAN) || 189 (!rdev->accel_working)) { 190 return; 191 } 192 193 r = radeon_bo_reserve(rbo, true); 194 if (r) { 195 dev_err(rdev->dev, "leaking bo va because " 196 "we fail to reserve bo (%d)\n", r); 197 return; 198 } 199 bo_va = radeon_vm_bo_find(vm, rbo); 200 if (bo_va) { 201 if (--bo_va->ref_count == 0) { 202 radeon_vm_bo_rmv(rdev, bo_va); 203 } 204 } 205 radeon_bo_unreserve(rbo); 206 } 207 208 static int radeon_gem_handle_lockup(struct radeon_device *rdev, int r) 209 { 210 if (r == -EDEADLK) { 211 r = radeon_gpu_reset(rdev); 212 if (!r) 213 r = -EAGAIN; 214 } 215 return r; 216 } 217 218 /* 219 * GEM ioctls. 220 */ 221 int radeon_gem_info_ioctl(struct drm_device *dev, void *data, 222 struct drm_file *filp) 223 { 224 struct radeon_device *rdev = dev->dev_private; 225 struct drm_radeon_gem_info *args = data; 226 struct ttm_mem_type_manager *man; 227 228 man = &rdev->mman.bdev.man[TTM_PL_VRAM]; 229 230 args->vram_size = (u64)man->size << PAGE_SHIFT; 231 args->vram_visible = rdev->mc.visible_vram_size; 232 args->vram_visible -= rdev->vram_pin_size; 233 args->gart_size = rdev->mc.gtt_size; 234 args->gart_size -= rdev->gart_pin_size; 235 236 return 0; 237 } 238 239 int radeon_gem_pread_ioctl(struct drm_device *dev, void *data, 240 struct drm_file *filp) 241 { 242 /* TODO: implement */ 243 DRM_ERROR("unimplemented %s\n", __func__); 244 return -ENOSYS; 245 } 246 247 int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data, 248 struct drm_file *filp) 249 { 250 /* TODO: implement */ 251 DRM_ERROR("unimplemented %s\n", __func__); 252 return -ENOSYS; 253 } 254 255 int radeon_gem_create_ioctl(struct drm_device *dev, void *data, 256 struct drm_file *filp) 257 { 258 struct radeon_device *rdev = dev->dev_private; 259 struct drm_radeon_gem_create *args = data; 260 struct drm_gem_object *gobj; 261 uint32_t handle; 262 int r; 263 264 down_read(&rdev->exclusive_lock); 265 /* create a gem object to contain this object in */ 266 args->size = roundup(args->size, PAGE_SIZE); 267 r = radeon_gem_object_create(rdev, args->size, args->alignment, 268 args->initial_domain, args->flags, 269 false, &gobj); 270 if (r) { 271 up_read(&rdev->exclusive_lock); 272 r = radeon_gem_handle_lockup(rdev, r); 273 return r; 274 } 275 r = drm_gem_handle_create(filp, gobj, &handle); 276 /* drop reference from allocate - handle holds it now */ 277 drm_gem_object_put_unlocked(gobj); 278 if (r) { 279 up_read(&rdev->exclusive_lock); 280 r = radeon_gem_handle_lockup(rdev, r); 281 return r; 282 } 283 args->handle = handle; 284 up_read(&rdev->exclusive_lock); 285 return 0; 286 } 287 288 int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data, 289 struct drm_file *filp) 290 { 291 struct ttm_operation_ctx ctx = { true, false }; 292 struct radeon_device *rdev = dev->dev_private; 293 struct drm_radeon_gem_userptr *args = data; 294 struct drm_gem_object *gobj; 295 struct radeon_bo *bo; 296 uint32_t handle; 297 int r; 298 299 if (offset_in_page(args->addr | args->size)) 300 return -EINVAL; 301 302 /* reject unknown flag values */ 303 if (args->flags & ~(RADEON_GEM_USERPTR_READONLY | 304 RADEON_GEM_USERPTR_ANONONLY | RADEON_GEM_USERPTR_VALIDATE | 305 RADEON_GEM_USERPTR_REGISTER)) 306 return -EINVAL; 307 308 if (args->flags & RADEON_GEM_USERPTR_READONLY) { 309 /* readonly pages not tested on older hardware */ 310 if (rdev->family < CHIP_R600) 311 return -EINVAL; 312 313 } else if (!(args->flags & RADEON_GEM_USERPTR_ANONONLY) || 314 !(args->flags & RADEON_GEM_USERPTR_REGISTER)) { 315 316 /* if we want to write to it we must require anonymous 317 memory and install a MMU notifier */ 318 return -EACCES; 319 } 320 321 down_read(&rdev->exclusive_lock); 322 323 /* create a gem object to contain this object in */ 324 r = radeon_gem_object_create(rdev, args->size, 0, 325 RADEON_GEM_DOMAIN_CPU, 0, 326 false, &gobj); 327 if (r) 328 goto handle_lockup; 329 330 bo = gem_to_radeon_bo(gobj); 331 r = radeon_ttm_tt_set_userptr(bo->tbo.ttm, args->addr, args->flags); 332 if (r) 333 goto release_object; 334 335 if (args->flags & RADEON_GEM_USERPTR_REGISTER) { 336 r = radeon_mn_register(bo, args->addr); 337 if (r) 338 goto release_object; 339 } 340 341 if (args->flags & RADEON_GEM_USERPTR_VALIDATE) { 342 down_read(¤t->mm->mmap_sem); 343 r = radeon_bo_reserve(bo, true); 344 if (r) { 345 up_read(¤t->mm->mmap_sem); 346 goto release_object; 347 } 348 349 radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_GTT); 350 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 351 radeon_bo_unreserve(bo); 352 up_read(¤t->mm->mmap_sem); 353 if (r) 354 goto release_object; 355 } 356 357 r = drm_gem_handle_create(filp, gobj, &handle); 358 /* drop reference from allocate - handle holds it now */ 359 drm_gem_object_put_unlocked(gobj); 360 if (r) 361 goto handle_lockup; 362 363 args->handle = handle; 364 up_read(&rdev->exclusive_lock); 365 return 0; 366 367 release_object: 368 drm_gem_object_put_unlocked(gobj); 369 370 handle_lockup: 371 up_read(&rdev->exclusive_lock); 372 r = radeon_gem_handle_lockup(rdev, r); 373 374 return r; 375 } 376 377 int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data, 378 struct drm_file *filp) 379 { 380 /* transition the BO to a domain - 381 * just validate the BO into a certain domain */ 382 struct radeon_device *rdev = dev->dev_private; 383 struct drm_radeon_gem_set_domain *args = data; 384 struct drm_gem_object *gobj; 385 struct radeon_bo *robj; 386 int r; 387 388 /* for now if someone requests domain CPU - 389 * just make sure the buffer is finished with */ 390 down_read(&rdev->exclusive_lock); 391 392 /* just do a BO wait for now */ 393 gobj = drm_gem_object_lookup(filp, args->handle); 394 if (gobj == NULL) { 395 up_read(&rdev->exclusive_lock); 396 return -ENOENT; 397 } 398 robj = gem_to_radeon_bo(gobj); 399 400 r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain); 401 402 drm_gem_object_put_unlocked(gobj); 403 up_read(&rdev->exclusive_lock); 404 r = radeon_gem_handle_lockup(robj->rdev, r); 405 return r; 406 } 407 408 int radeon_mode_dumb_mmap(struct drm_file *filp, 409 struct drm_device *dev, 410 uint32_t handle, uint64_t *offset_p) 411 { 412 struct drm_gem_object *gobj; 413 struct radeon_bo *robj; 414 415 gobj = drm_gem_object_lookup(filp, handle); 416 if (gobj == NULL) { 417 return -ENOENT; 418 } 419 robj = gem_to_radeon_bo(gobj); 420 if (radeon_ttm_tt_has_userptr(robj->tbo.ttm)) { 421 drm_gem_object_put_unlocked(gobj); 422 return -EPERM; 423 } 424 *offset_p = radeon_bo_mmap_offset(robj); 425 drm_gem_object_put_unlocked(gobj); 426 return 0; 427 } 428 429 int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data, 430 struct drm_file *filp) 431 { 432 struct drm_radeon_gem_mmap *args = data; 433 434 return radeon_mode_dumb_mmap(filp, dev, args->handle, &args->addr_ptr); 435 } 436 437 int radeon_gem_busy_ioctl(struct drm_device *dev, void *data, 438 struct drm_file *filp) 439 { 440 struct drm_radeon_gem_busy *args = data; 441 struct drm_gem_object *gobj; 442 struct radeon_bo *robj; 443 int r; 444 uint32_t cur_placement = 0; 445 446 gobj = drm_gem_object_lookup(filp, args->handle); 447 if (gobj == NULL) { 448 return -ENOENT; 449 } 450 robj = gem_to_radeon_bo(gobj); 451 452 r = reservation_object_test_signaled_rcu(robj->tbo.resv, true); 453 if (r == 0) 454 r = -EBUSY; 455 else 456 r = 0; 457 458 cur_placement = READ_ONCE(robj->tbo.mem.mem_type); 459 args->domain = radeon_mem_type_to_domain(cur_placement); 460 drm_gem_object_put_unlocked(gobj); 461 return r; 462 } 463 464 int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data, 465 struct drm_file *filp) 466 { 467 struct radeon_device *rdev = dev->dev_private; 468 struct drm_radeon_gem_wait_idle *args = data; 469 struct drm_gem_object *gobj; 470 struct radeon_bo *robj; 471 int r = 0; 472 uint32_t cur_placement = 0; 473 long ret; 474 475 gobj = drm_gem_object_lookup(filp, args->handle); 476 if (gobj == NULL) { 477 return -ENOENT; 478 } 479 robj = gem_to_radeon_bo(gobj); 480 481 ret = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true, 30 * HZ); 482 if (ret == 0) 483 r = -EBUSY; 484 else if (ret < 0) 485 r = ret; 486 487 /* Flush HDP cache via MMIO if necessary */ 488 cur_placement = READ_ONCE(robj->tbo.mem.mem_type); 489 if (rdev->asic->mmio_hdp_flush && 490 radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM) 491 robj->rdev->asic->mmio_hdp_flush(rdev); 492 drm_gem_object_put_unlocked(gobj); 493 r = radeon_gem_handle_lockup(rdev, r); 494 return r; 495 } 496 497 int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data, 498 struct drm_file *filp) 499 { 500 struct drm_radeon_gem_set_tiling *args = data; 501 struct drm_gem_object *gobj; 502 struct radeon_bo *robj; 503 int r = 0; 504 505 DRM_DEBUG("%d \n", args->handle); 506 gobj = drm_gem_object_lookup(filp, args->handle); 507 if (gobj == NULL) 508 return -ENOENT; 509 robj = gem_to_radeon_bo(gobj); 510 r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch); 511 drm_gem_object_put_unlocked(gobj); 512 return r; 513 } 514 515 int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data, 516 struct drm_file *filp) 517 { 518 struct drm_radeon_gem_get_tiling *args = data; 519 struct drm_gem_object *gobj; 520 struct radeon_bo *rbo; 521 int r = 0; 522 523 DRM_DEBUG("\n"); 524 gobj = drm_gem_object_lookup(filp, args->handle); 525 if (gobj == NULL) 526 return -ENOENT; 527 rbo = gem_to_radeon_bo(gobj); 528 r = radeon_bo_reserve(rbo, false); 529 if (unlikely(r != 0)) 530 goto out; 531 radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch); 532 radeon_bo_unreserve(rbo); 533 out: 534 drm_gem_object_put_unlocked(gobj); 535 return r; 536 } 537 538 /** 539 * radeon_gem_va_update_vm -update the bo_va in its VM 540 * 541 * @rdev: radeon_device pointer 542 * @bo_va: bo_va to update 543 * 544 * Update the bo_va directly after setting it's address. Errors are not 545 * vital here, so they are not reported back to userspace. 546 */ 547 static void radeon_gem_va_update_vm(struct radeon_device *rdev, 548 struct radeon_bo_va *bo_va) 549 { 550 struct ttm_validate_buffer tv, *entry; 551 struct radeon_bo_list *vm_bos; 552 struct ww_acquire_ctx ticket; 553 struct list_head list; 554 unsigned domain; 555 int r; 556 557 INIT_LIST_HEAD(&list); 558 559 tv.bo = &bo_va->bo->tbo; 560 tv.num_shared = 1; 561 list_add(&tv.head, &list); 562 563 vm_bos = radeon_vm_get_bos(rdev, bo_va->vm, &list); 564 if (!vm_bos) 565 return; 566 567 r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL, true); 568 if (r) 569 goto error_free; 570 571 list_for_each_entry(entry, &list, head) { 572 domain = radeon_mem_type_to_domain(entry->bo->mem.mem_type); 573 /* if anything is swapped out don't swap it in here, 574 just abort and wait for the next CS */ 575 if (domain == RADEON_GEM_DOMAIN_CPU) 576 goto error_unreserve; 577 } 578 579 mutex_lock(&bo_va->vm->mutex); 580 r = radeon_vm_clear_freed(rdev, bo_va->vm); 581 if (r) 582 goto error_unlock; 583 584 if (bo_va->it.start) 585 r = radeon_vm_bo_update(rdev, bo_va, &bo_va->bo->tbo.mem); 586 587 error_unlock: 588 mutex_unlock(&bo_va->vm->mutex); 589 590 error_unreserve: 591 ttm_eu_backoff_reservation(&ticket, &list); 592 593 error_free: 594 kvfree(vm_bos); 595 596 if (r && r != -ERESTARTSYS) 597 DRM_ERROR("Couldn't update BO_VA (%d)\n", r); 598 } 599 600 int radeon_gem_va_ioctl(struct drm_device *dev, void *data, 601 struct drm_file *filp) 602 { 603 struct drm_radeon_gem_va *args = data; 604 struct drm_gem_object *gobj; 605 struct radeon_device *rdev = dev->dev_private; 606 struct radeon_fpriv *fpriv = filp->driver_priv; 607 struct radeon_bo *rbo; 608 struct radeon_bo_va *bo_va; 609 u32 invalid_flags; 610 int r = 0; 611 612 if (!rdev->vm_manager.enabled) { 613 args->operation = RADEON_VA_RESULT_ERROR; 614 return -ENOTTY; 615 } 616 617 /* !! DONT REMOVE !! 618 * We don't support vm_id yet, to be sure we don't have have broken 619 * userspace, reject anyone trying to use non 0 value thus moving 620 * forward we can use those fields without breaking existant userspace 621 */ 622 if (args->vm_id) { 623 args->operation = RADEON_VA_RESULT_ERROR; 624 return -EINVAL; 625 } 626 627 if (args->offset < RADEON_VA_RESERVED_SIZE) { 628 dev_err(&dev->pdev->dev, 629 "offset 0x%lX is in reserved area 0x%X\n", 630 (unsigned long)args->offset, 631 RADEON_VA_RESERVED_SIZE); 632 args->operation = RADEON_VA_RESULT_ERROR; 633 return -EINVAL; 634 } 635 636 /* don't remove, we need to enforce userspace to set the snooped flag 637 * otherwise we will endup with broken userspace and we won't be able 638 * to enable this feature without adding new interface 639 */ 640 invalid_flags = RADEON_VM_PAGE_VALID | RADEON_VM_PAGE_SYSTEM; 641 if ((args->flags & invalid_flags)) { 642 dev_err(&dev->pdev->dev, "invalid flags 0x%08X vs 0x%08X\n", 643 args->flags, invalid_flags); 644 args->operation = RADEON_VA_RESULT_ERROR; 645 return -EINVAL; 646 } 647 648 switch (args->operation) { 649 case RADEON_VA_MAP: 650 case RADEON_VA_UNMAP: 651 break; 652 default: 653 dev_err(&dev->pdev->dev, "unsupported operation %d\n", 654 args->operation); 655 args->operation = RADEON_VA_RESULT_ERROR; 656 return -EINVAL; 657 } 658 659 gobj = drm_gem_object_lookup(filp, args->handle); 660 if (gobj == NULL) { 661 args->operation = RADEON_VA_RESULT_ERROR; 662 return -ENOENT; 663 } 664 rbo = gem_to_radeon_bo(gobj); 665 r = radeon_bo_reserve(rbo, false); 666 if (r) { 667 args->operation = RADEON_VA_RESULT_ERROR; 668 drm_gem_object_put_unlocked(gobj); 669 return r; 670 } 671 bo_va = radeon_vm_bo_find(&fpriv->vm, rbo); 672 if (!bo_va) { 673 args->operation = RADEON_VA_RESULT_ERROR; 674 radeon_bo_unreserve(rbo); 675 drm_gem_object_put_unlocked(gobj); 676 return -ENOENT; 677 } 678 679 switch (args->operation) { 680 case RADEON_VA_MAP: 681 if (bo_va->it.start) { 682 args->operation = RADEON_VA_RESULT_VA_EXIST; 683 args->offset = bo_va->it.start * RADEON_GPU_PAGE_SIZE; 684 radeon_bo_unreserve(rbo); 685 goto out; 686 } 687 r = radeon_vm_bo_set_addr(rdev, bo_va, args->offset, args->flags); 688 break; 689 case RADEON_VA_UNMAP: 690 r = radeon_vm_bo_set_addr(rdev, bo_va, 0, 0); 691 break; 692 default: 693 break; 694 } 695 if (!r) 696 radeon_gem_va_update_vm(rdev, bo_va); 697 args->operation = RADEON_VA_RESULT_OK; 698 if (r) { 699 args->operation = RADEON_VA_RESULT_ERROR; 700 } 701 out: 702 drm_gem_object_put_unlocked(gobj); 703 return r; 704 } 705 706 int radeon_gem_op_ioctl(struct drm_device *dev, void *data, 707 struct drm_file *filp) 708 { 709 struct drm_radeon_gem_op *args = data; 710 struct drm_gem_object *gobj; 711 struct radeon_bo *robj; 712 int r; 713 714 gobj = drm_gem_object_lookup(filp, args->handle); 715 if (gobj == NULL) { 716 return -ENOENT; 717 } 718 robj = gem_to_radeon_bo(gobj); 719 720 r = -EPERM; 721 if (radeon_ttm_tt_has_userptr(robj->tbo.ttm)) 722 goto out; 723 724 r = radeon_bo_reserve(robj, false); 725 if (unlikely(r)) 726 goto out; 727 728 switch (args->op) { 729 case RADEON_GEM_OP_GET_INITIAL_DOMAIN: 730 args->value = robj->initial_domain; 731 break; 732 case RADEON_GEM_OP_SET_INITIAL_DOMAIN: 733 robj->initial_domain = args->value & (RADEON_GEM_DOMAIN_VRAM | 734 RADEON_GEM_DOMAIN_GTT | 735 RADEON_GEM_DOMAIN_CPU); 736 break; 737 default: 738 r = -EINVAL; 739 } 740 741 radeon_bo_unreserve(robj); 742 out: 743 drm_gem_object_put_unlocked(gobj); 744 return r; 745 } 746 747 int radeon_mode_dumb_create(struct drm_file *file_priv, 748 struct drm_device *dev, 749 struct drm_mode_create_dumb *args) 750 { 751 struct radeon_device *rdev = dev->dev_private; 752 struct drm_gem_object *gobj; 753 uint32_t handle; 754 int r; 755 756 args->pitch = radeon_align_pitch(rdev, args->width, 757 DIV_ROUND_UP(args->bpp, 8), 0); 758 args->size = args->pitch * args->height; 759 args->size = ALIGN(args->size, PAGE_SIZE); 760 761 r = radeon_gem_object_create(rdev, args->size, 0, 762 RADEON_GEM_DOMAIN_VRAM, 0, 763 false, &gobj); 764 if (r) 765 return -ENOMEM; 766 767 r = drm_gem_handle_create(file_priv, gobj, &handle); 768 /* drop reference from allocate - handle holds it now */ 769 drm_gem_object_put_unlocked(gobj); 770 if (r) { 771 return r; 772 } 773 args->handle = handle; 774 return 0; 775 } 776 777 #if defined(CONFIG_DEBUG_FS) 778 static int radeon_debugfs_gem_info(struct seq_file *m, void *data) 779 { 780 struct drm_info_node *node = (struct drm_info_node *)m->private; 781 struct drm_device *dev = node->minor->dev; 782 struct radeon_device *rdev = dev->dev_private; 783 struct radeon_bo *rbo; 784 unsigned i = 0; 785 786 mutex_lock(&rdev->gem.mutex); 787 list_for_each_entry(rbo, &rdev->gem.objects, list) { 788 unsigned domain; 789 const char *placement; 790 791 domain = radeon_mem_type_to_domain(rbo->tbo.mem.mem_type); 792 switch (domain) { 793 case RADEON_GEM_DOMAIN_VRAM: 794 placement = "VRAM"; 795 break; 796 case RADEON_GEM_DOMAIN_GTT: 797 placement = " GTT"; 798 break; 799 case RADEON_GEM_DOMAIN_CPU: 800 default: 801 placement = " CPU"; 802 break; 803 } 804 seq_printf(m, "bo[0x%08x] %8ldkB %8ldMB %s pid %8ld\n", 805 i, radeon_bo_size(rbo) >> 10, radeon_bo_size(rbo) >> 20, 806 placement, (unsigned long)rbo->pid); 807 i++; 808 } 809 mutex_unlock(&rdev->gem.mutex); 810 return 0; 811 } 812 813 static struct drm_info_list radeon_debugfs_gem_list[] = { 814 {"radeon_gem_info", &radeon_debugfs_gem_info, 0, NULL}, 815 }; 816 #endif 817 818 int radeon_gem_debugfs_init(struct radeon_device *rdev) 819 { 820 #if defined(CONFIG_DEBUG_FS) 821 return radeon_debugfs_add_files(rdev, radeon_debugfs_gem_list, 1); 822 #endif 823 return 0; 824 } 825