1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 */ 28 #include <drm/drmP.h> 29 #include <drm/radeon_drm.h> 30 #include "radeon.h" 31 32 void radeon_gem_object_free(struct drm_gem_object *gobj) 33 { 34 struct radeon_bo *robj = gem_to_radeon_bo(gobj); 35 36 if (robj) { 37 if (robj->gem_base.import_attach) 38 drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg); 39 radeon_mn_unregister(robj); 40 radeon_bo_unref(&robj); 41 } 42 } 43 44 int radeon_gem_object_create(struct radeon_device *rdev, unsigned long size, 45 int alignment, int initial_domain, 46 u32 flags, bool kernel, 47 struct drm_gem_object **obj) 48 { 49 struct radeon_bo *robj; 50 unsigned long max_size; 51 int r; 52 53 *obj = NULL; 54 /* At least align on page size */ 55 if (alignment < PAGE_SIZE) { 56 alignment = PAGE_SIZE; 57 } 58 59 /* Maximum bo size is the unpinned gtt size since we use the gtt to 60 * handle vram to system pool migrations. 61 */ 62 max_size = rdev->mc.gtt_size - rdev->gart_pin_size; 63 if (size > max_size) { 64 DRM_DEBUG("Allocation size %ldMb bigger than %ldMb limit\n", 65 size >> 20, max_size >> 20); 66 return -ENOMEM; 67 } 68 69 retry: 70 r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain, 71 flags, NULL, NULL, &robj); 72 if (r) { 73 if (r != -ERESTARTSYS) { 74 if (initial_domain == RADEON_GEM_DOMAIN_VRAM) { 75 initial_domain |= RADEON_GEM_DOMAIN_GTT; 76 goto retry; 77 } 78 DRM_ERROR("Failed to allocate GEM object (%ld, %d, %u, %d)\n", 79 size, initial_domain, alignment, r); 80 } 81 return r; 82 } 83 *obj = &robj->gem_base; 84 robj->pid = task_pid_nr(current); 85 86 mutex_lock(&rdev->gem.mutex); 87 list_add_tail(&robj->list, &rdev->gem.objects); 88 mutex_unlock(&rdev->gem.mutex); 89 90 return 0; 91 } 92 93 static int radeon_gem_set_domain(struct drm_gem_object *gobj, 94 uint32_t rdomain, uint32_t wdomain) 95 { 96 struct radeon_bo *robj; 97 uint32_t domain; 98 long r; 99 100 /* FIXME: reeimplement */ 101 robj = gem_to_radeon_bo(gobj); 102 /* work out where to validate the buffer to */ 103 domain = wdomain; 104 if (!domain) { 105 domain = rdomain; 106 } 107 if (!domain) { 108 /* Do nothings */ 109 printk(KERN_WARNING "Set domain without domain !\n"); 110 return 0; 111 } 112 if (domain == RADEON_GEM_DOMAIN_CPU) { 113 /* Asking for cpu access wait for object idle */ 114 r = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true, 30 * HZ); 115 if (!r) 116 r = -EBUSY; 117 118 if (r < 0 && r != -EINTR) { 119 printk(KERN_ERR "Failed to wait for object: %li\n", r); 120 return r; 121 } 122 } 123 return 0; 124 } 125 126 int radeon_gem_init(struct radeon_device *rdev) 127 { 128 INIT_LIST_HEAD(&rdev->gem.objects); 129 return 0; 130 } 131 132 void radeon_gem_fini(struct radeon_device *rdev) 133 { 134 radeon_bo_force_delete(rdev); 135 } 136 137 /* 138 * Call from drm_gem_handle_create which appear in both new and open ioctl 139 * case. 140 */ 141 int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv) 142 { 143 struct radeon_bo *rbo = gem_to_radeon_bo(obj); 144 struct radeon_device *rdev = rbo->rdev; 145 struct radeon_fpriv *fpriv = file_priv->driver_priv; 146 struct radeon_vm *vm = &fpriv->vm; 147 struct radeon_bo_va *bo_va; 148 int r; 149 150 if ((rdev->family < CHIP_CAYMAN) || 151 (!rdev->accel_working)) { 152 return 0; 153 } 154 155 r = radeon_bo_reserve(rbo, false); 156 if (r) { 157 return r; 158 } 159 160 bo_va = radeon_vm_bo_find(vm, rbo); 161 if (!bo_va) { 162 bo_va = radeon_vm_bo_add(rdev, vm, rbo); 163 } else { 164 ++bo_va->ref_count; 165 } 166 radeon_bo_unreserve(rbo); 167 168 return 0; 169 } 170 171 void radeon_gem_object_close(struct drm_gem_object *obj, 172 struct drm_file *file_priv) 173 { 174 struct radeon_bo *rbo = gem_to_radeon_bo(obj); 175 struct radeon_device *rdev = rbo->rdev; 176 struct radeon_fpriv *fpriv = file_priv->driver_priv; 177 struct radeon_vm *vm = &fpriv->vm; 178 struct radeon_bo_va *bo_va; 179 int r; 180 181 if ((rdev->family < CHIP_CAYMAN) || 182 (!rdev->accel_working)) { 183 return; 184 } 185 186 r = radeon_bo_reserve(rbo, true); 187 if (r) { 188 dev_err(rdev->dev, "leaking bo va because " 189 "we fail to reserve bo (%d)\n", r); 190 return; 191 } 192 bo_va = radeon_vm_bo_find(vm, rbo); 193 if (bo_va) { 194 if (--bo_va->ref_count == 0) { 195 radeon_vm_bo_rmv(rdev, bo_va); 196 } 197 } 198 radeon_bo_unreserve(rbo); 199 } 200 201 static int radeon_gem_handle_lockup(struct radeon_device *rdev, int r) 202 { 203 if (r == -EDEADLK) { 204 r = radeon_gpu_reset(rdev); 205 if (!r) 206 r = -EAGAIN; 207 } 208 return r; 209 } 210 211 /* 212 * GEM ioctls. 213 */ 214 int radeon_gem_info_ioctl(struct drm_device *dev, void *data, 215 struct drm_file *filp) 216 { 217 struct radeon_device *rdev = dev->dev_private; 218 struct drm_radeon_gem_info *args = data; 219 struct ttm_mem_type_manager *man; 220 221 man = &rdev->mman.bdev.man[TTM_PL_VRAM]; 222 223 args->vram_size = rdev->mc.real_vram_size; 224 args->vram_visible = (u64)man->size << PAGE_SHIFT; 225 args->vram_visible -= rdev->vram_pin_size; 226 args->gart_size = rdev->mc.gtt_size; 227 args->gart_size -= rdev->gart_pin_size; 228 229 return 0; 230 } 231 232 int radeon_gem_pread_ioctl(struct drm_device *dev, void *data, 233 struct drm_file *filp) 234 { 235 /* TODO: implement */ 236 DRM_ERROR("unimplemented %s\n", __func__); 237 return -ENOSYS; 238 } 239 240 int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data, 241 struct drm_file *filp) 242 { 243 /* TODO: implement */ 244 DRM_ERROR("unimplemented %s\n", __func__); 245 return -ENOSYS; 246 } 247 248 int radeon_gem_create_ioctl(struct drm_device *dev, void *data, 249 struct drm_file *filp) 250 { 251 struct radeon_device *rdev = dev->dev_private; 252 struct drm_radeon_gem_create *args = data; 253 struct drm_gem_object *gobj; 254 uint32_t handle; 255 int r; 256 257 down_read(&rdev->exclusive_lock); 258 /* create a gem object to contain this object in */ 259 args->size = roundup(args->size, PAGE_SIZE); 260 r = radeon_gem_object_create(rdev, args->size, args->alignment, 261 args->initial_domain, args->flags, 262 false, &gobj); 263 if (r) { 264 up_read(&rdev->exclusive_lock); 265 r = radeon_gem_handle_lockup(rdev, r); 266 return r; 267 } 268 r = drm_gem_handle_create(filp, gobj, &handle); 269 /* drop reference from allocate - handle holds it now */ 270 drm_gem_object_unreference_unlocked(gobj); 271 if (r) { 272 up_read(&rdev->exclusive_lock); 273 r = radeon_gem_handle_lockup(rdev, r); 274 return r; 275 } 276 args->handle = handle; 277 up_read(&rdev->exclusive_lock); 278 return 0; 279 } 280 281 int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data, 282 struct drm_file *filp) 283 { 284 struct radeon_device *rdev = dev->dev_private; 285 struct drm_radeon_gem_userptr *args = data; 286 struct drm_gem_object *gobj; 287 struct radeon_bo *bo; 288 uint32_t handle; 289 int r; 290 291 if (offset_in_page(args->addr | args->size)) 292 return -EINVAL; 293 294 /* reject unknown flag values */ 295 if (args->flags & ~(RADEON_GEM_USERPTR_READONLY | 296 RADEON_GEM_USERPTR_ANONONLY | RADEON_GEM_USERPTR_VALIDATE | 297 RADEON_GEM_USERPTR_REGISTER)) 298 return -EINVAL; 299 300 if (args->flags & RADEON_GEM_USERPTR_READONLY) { 301 /* readonly pages not tested on older hardware */ 302 if (rdev->family < CHIP_R600) 303 return -EINVAL; 304 305 } else if (!(args->flags & RADEON_GEM_USERPTR_ANONONLY) || 306 !(args->flags & RADEON_GEM_USERPTR_REGISTER)) { 307 308 /* if we want to write to it we must require anonymous 309 memory and install a MMU notifier */ 310 return -EACCES; 311 } 312 313 down_read(&rdev->exclusive_lock); 314 315 /* create a gem object to contain this object in */ 316 r = radeon_gem_object_create(rdev, args->size, 0, 317 RADEON_GEM_DOMAIN_CPU, 0, 318 false, &gobj); 319 if (r) 320 goto handle_lockup; 321 322 bo = gem_to_radeon_bo(gobj); 323 r = radeon_ttm_tt_set_userptr(bo->tbo.ttm, args->addr, args->flags); 324 if (r) 325 goto release_object; 326 327 if (args->flags & RADEON_GEM_USERPTR_REGISTER) { 328 r = radeon_mn_register(bo, args->addr); 329 if (r) 330 goto release_object; 331 } 332 333 if (args->flags & RADEON_GEM_USERPTR_VALIDATE) { 334 down_read(¤t->mm->mmap_sem); 335 r = radeon_bo_reserve(bo, true); 336 if (r) { 337 up_read(¤t->mm->mmap_sem); 338 goto release_object; 339 } 340 341 radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_GTT); 342 r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); 343 radeon_bo_unreserve(bo); 344 up_read(¤t->mm->mmap_sem); 345 if (r) 346 goto release_object; 347 } 348 349 r = drm_gem_handle_create(filp, gobj, &handle); 350 /* drop reference from allocate - handle holds it now */ 351 drm_gem_object_unreference_unlocked(gobj); 352 if (r) 353 goto handle_lockup; 354 355 args->handle = handle; 356 up_read(&rdev->exclusive_lock); 357 return 0; 358 359 release_object: 360 drm_gem_object_unreference_unlocked(gobj); 361 362 handle_lockup: 363 up_read(&rdev->exclusive_lock); 364 r = radeon_gem_handle_lockup(rdev, r); 365 366 return r; 367 } 368 369 int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data, 370 struct drm_file *filp) 371 { 372 /* transition the BO to a domain - 373 * just validate the BO into a certain domain */ 374 struct radeon_device *rdev = dev->dev_private; 375 struct drm_radeon_gem_set_domain *args = data; 376 struct drm_gem_object *gobj; 377 struct radeon_bo *robj; 378 int r; 379 380 /* for now if someone requests domain CPU - 381 * just make sure the buffer is finished with */ 382 down_read(&rdev->exclusive_lock); 383 384 /* just do a BO wait for now */ 385 gobj = drm_gem_object_lookup(dev, filp, args->handle); 386 if (gobj == NULL) { 387 up_read(&rdev->exclusive_lock); 388 return -ENOENT; 389 } 390 robj = gem_to_radeon_bo(gobj); 391 392 r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain); 393 394 drm_gem_object_unreference_unlocked(gobj); 395 up_read(&rdev->exclusive_lock); 396 r = radeon_gem_handle_lockup(robj->rdev, r); 397 return r; 398 } 399 400 int radeon_mode_dumb_mmap(struct drm_file *filp, 401 struct drm_device *dev, 402 uint32_t handle, uint64_t *offset_p) 403 { 404 struct drm_gem_object *gobj; 405 struct radeon_bo *robj; 406 407 gobj = drm_gem_object_lookup(dev, filp, handle); 408 if (gobj == NULL) { 409 return -ENOENT; 410 } 411 robj = gem_to_radeon_bo(gobj); 412 if (radeon_ttm_tt_has_userptr(robj->tbo.ttm)) { 413 drm_gem_object_unreference_unlocked(gobj); 414 return -EPERM; 415 } 416 *offset_p = radeon_bo_mmap_offset(robj); 417 drm_gem_object_unreference_unlocked(gobj); 418 return 0; 419 } 420 421 int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data, 422 struct drm_file *filp) 423 { 424 struct drm_radeon_gem_mmap *args = data; 425 426 return radeon_mode_dumb_mmap(filp, dev, args->handle, &args->addr_ptr); 427 } 428 429 int radeon_gem_busy_ioctl(struct drm_device *dev, void *data, 430 struct drm_file *filp) 431 { 432 struct drm_radeon_gem_busy *args = data; 433 struct drm_gem_object *gobj; 434 struct radeon_bo *robj; 435 int r; 436 uint32_t cur_placement = 0; 437 438 gobj = drm_gem_object_lookup(dev, filp, args->handle); 439 if (gobj == NULL) { 440 return -ENOENT; 441 } 442 robj = gem_to_radeon_bo(gobj); 443 444 r = reservation_object_test_signaled_rcu(robj->tbo.resv, true); 445 if (r == 0) 446 r = -EBUSY; 447 else 448 r = 0; 449 450 cur_placement = ACCESS_ONCE(robj->tbo.mem.mem_type); 451 args->domain = radeon_mem_type_to_domain(cur_placement); 452 drm_gem_object_unreference_unlocked(gobj); 453 return r; 454 } 455 456 int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data, 457 struct drm_file *filp) 458 { 459 struct radeon_device *rdev = dev->dev_private; 460 struct drm_radeon_gem_wait_idle *args = data; 461 struct drm_gem_object *gobj; 462 struct radeon_bo *robj; 463 int r = 0; 464 uint32_t cur_placement = 0; 465 long ret; 466 467 gobj = drm_gem_object_lookup(dev, filp, args->handle); 468 if (gobj == NULL) { 469 return -ENOENT; 470 } 471 robj = gem_to_radeon_bo(gobj); 472 473 ret = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true, 30 * HZ); 474 if (ret == 0) 475 r = -EBUSY; 476 else if (ret < 0) 477 r = ret; 478 479 /* Flush HDP cache via MMIO if necessary */ 480 cur_placement = ACCESS_ONCE(robj->tbo.mem.mem_type); 481 if (rdev->asic->mmio_hdp_flush && 482 radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM) 483 robj->rdev->asic->mmio_hdp_flush(rdev); 484 drm_gem_object_unreference_unlocked(gobj); 485 r = radeon_gem_handle_lockup(rdev, r); 486 return r; 487 } 488 489 int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data, 490 struct drm_file *filp) 491 { 492 struct drm_radeon_gem_set_tiling *args = data; 493 struct drm_gem_object *gobj; 494 struct radeon_bo *robj; 495 int r = 0; 496 497 DRM_DEBUG("%d \n", args->handle); 498 gobj = drm_gem_object_lookup(dev, filp, args->handle); 499 if (gobj == NULL) 500 return -ENOENT; 501 robj = gem_to_radeon_bo(gobj); 502 r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch); 503 drm_gem_object_unreference_unlocked(gobj); 504 return r; 505 } 506 507 int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data, 508 struct drm_file *filp) 509 { 510 struct drm_radeon_gem_get_tiling *args = data; 511 struct drm_gem_object *gobj; 512 struct radeon_bo *rbo; 513 int r = 0; 514 515 DRM_DEBUG("\n"); 516 gobj = drm_gem_object_lookup(dev, filp, args->handle); 517 if (gobj == NULL) 518 return -ENOENT; 519 rbo = gem_to_radeon_bo(gobj); 520 r = radeon_bo_reserve(rbo, false); 521 if (unlikely(r != 0)) 522 goto out; 523 radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch); 524 radeon_bo_unreserve(rbo); 525 out: 526 drm_gem_object_unreference_unlocked(gobj); 527 return r; 528 } 529 530 /** 531 * radeon_gem_va_update_vm -update the bo_va in its VM 532 * 533 * @rdev: radeon_device pointer 534 * @bo_va: bo_va to update 535 * 536 * Update the bo_va directly after setting it's address. Errors are not 537 * vital here, so they are not reported back to userspace. 538 */ 539 static void radeon_gem_va_update_vm(struct radeon_device *rdev, 540 struct radeon_bo_va *bo_va) 541 { 542 struct ttm_validate_buffer tv, *entry; 543 struct radeon_bo_list *vm_bos; 544 struct ww_acquire_ctx ticket; 545 struct list_head list; 546 unsigned domain; 547 int r; 548 549 INIT_LIST_HEAD(&list); 550 551 tv.bo = &bo_va->bo->tbo; 552 tv.shared = true; 553 list_add(&tv.head, &list); 554 555 vm_bos = radeon_vm_get_bos(rdev, bo_va->vm, &list); 556 if (!vm_bos) 557 return; 558 559 r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL); 560 if (r) 561 goto error_free; 562 563 list_for_each_entry(entry, &list, head) { 564 domain = radeon_mem_type_to_domain(entry->bo->mem.mem_type); 565 /* if anything is swapped out don't swap it in here, 566 just abort and wait for the next CS */ 567 if (domain == RADEON_GEM_DOMAIN_CPU) 568 goto error_unreserve; 569 } 570 571 mutex_lock(&bo_va->vm->mutex); 572 r = radeon_vm_clear_freed(rdev, bo_va->vm); 573 if (r) 574 goto error_unlock; 575 576 if (bo_va->it.start) 577 r = radeon_vm_bo_update(rdev, bo_va, &bo_va->bo->tbo.mem); 578 579 error_unlock: 580 mutex_unlock(&bo_va->vm->mutex); 581 582 error_unreserve: 583 ttm_eu_backoff_reservation(&ticket, &list); 584 585 error_free: 586 drm_free_large(vm_bos); 587 588 if (r && r != -ERESTARTSYS) 589 DRM_ERROR("Couldn't update BO_VA (%d)\n", r); 590 } 591 592 int radeon_gem_va_ioctl(struct drm_device *dev, void *data, 593 struct drm_file *filp) 594 { 595 struct drm_radeon_gem_va *args = data; 596 struct drm_gem_object *gobj; 597 struct radeon_device *rdev = dev->dev_private; 598 struct radeon_fpriv *fpriv = filp->driver_priv; 599 struct radeon_bo *rbo; 600 struct radeon_bo_va *bo_va; 601 u32 invalid_flags; 602 int r = 0; 603 604 if (!rdev->vm_manager.enabled) { 605 args->operation = RADEON_VA_RESULT_ERROR; 606 return -ENOTTY; 607 } 608 609 /* !! DONT REMOVE !! 610 * We don't support vm_id yet, to be sure we don't have have broken 611 * userspace, reject anyone trying to use non 0 value thus moving 612 * forward we can use those fields without breaking existant userspace 613 */ 614 if (args->vm_id) { 615 args->operation = RADEON_VA_RESULT_ERROR; 616 return -EINVAL; 617 } 618 619 if (args->offset < RADEON_VA_RESERVED_SIZE) { 620 dev_err(&dev->pdev->dev, 621 "offset 0x%lX is in reserved area 0x%X\n", 622 (unsigned long)args->offset, 623 RADEON_VA_RESERVED_SIZE); 624 args->operation = RADEON_VA_RESULT_ERROR; 625 return -EINVAL; 626 } 627 628 /* don't remove, we need to enforce userspace to set the snooped flag 629 * otherwise we will endup with broken userspace and we won't be able 630 * to enable this feature without adding new interface 631 */ 632 invalid_flags = RADEON_VM_PAGE_VALID | RADEON_VM_PAGE_SYSTEM; 633 if ((args->flags & invalid_flags)) { 634 dev_err(&dev->pdev->dev, "invalid flags 0x%08X vs 0x%08X\n", 635 args->flags, invalid_flags); 636 args->operation = RADEON_VA_RESULT_ERROR; 637 return -EINVAL; 638 } 639 640 switch (args->operation) { 641 case RADEON_VA_MAP: 642 case RADEON_VA_UNMAP: 643 break; 644 default: 645 dev_err(&dev->pdev->dev, "unsupported operation %d\n", 646 args->operation); 647 args->operation = RADEON_VA_RESULT_ERROR; 648 return -EINVAL; 649 } 650 651 gobj = drm_gem_object_lookup(dev, filp, args->handle); 652 if (gobj == NULL) { 653 args->operation = RADEON_VA_RESULT_ERROR; 654 return -ENOENT; 655 } 656 rbo = gem_to_radeon_bo(gobj); 657 r = radeon_bo_reserve(rbo, false); 658 if (r) { 659 args->operation = RADEON_VA_RESULT_ERROR; 660 drm_gem_object_unreference_unlocked(gobj); 661 return r; 662 } 663 bo_va = radeon_vm_bo_find(&fpriv->vm, rbo); 664 if (!bo_va) { 665 args->operation = RADEON_VA_RESULT_ERROR; 666 drm_gem_object_unreference_unlocked(gobj); 667 return -ENOENT; 668 } 669 670 switch (args->operation) { 671 case RADEON_VA_MAP: 672 if (bo_va->it.start) { 673 args->operation = RADEON_VA_RESULT_VA_EXIST; 674 args->offset = bo_va->it.start * RADEON_GPU_PAGE_SIZE; 675 radeon_bo_unreserve(rbo); 676 goto out; 677 } 678 r = radeon_vm_bo_set_addr(rdev, bo_va, args->offset, args->flags); 679 break; 680 case RADEON_VA_UNMAP: 681 r = radeon_vm_bo_set_addr(rdev, bo_va, 0, 0); 682 break; 683 default: 684 break; 685 } 686 if (!r) 687 radeon_gem_va_update_vm(rdev, bo_va); 688 args->operation = RADEON_VA_RESULT_OK; 689 if (r) { 690 args->operation = RADEON_VA_RESULT_ERROR; 691 } 692 out: 693 drm_gem_object_unreference_unlocked(gobj); 694 return r; 695 } 696 697 int radeon_gem_op_ioctl(struct drm_device *dev, void *data, 698 struct drm_file *filp) 699 { 700 struct drm_radeon_gem_op *args = data; 701 struct drm_gem_object *gobj; 702 struct radeon_bo *robj; 703 int r; 704 705 gobj = drm_gem_object_lookup(dev, filp, args->handle); 706 if (gobj == NULL) { 707 return -ENOENT; 708 } 709 robj = gem_to_radeon_bo(gobj); 710 711 r = -EPERM; 712 if (radeon_ttm_tt_has_userptr(robj->tbo.ttm)) 713 goto out; 714 715 r = radeon_bo_reserve(robj, false); 716 if (unlikely(r)) 717 goto out; 718 719 switch (args->op) { 720 case RADEON_GEM_OP_GET_INITIAL_DOMAIN: 721 args->value = robj->initial_domain; 722 break; 723 case RADEON_GEM_OP_SET_INITIAL_DOMAIN: 724 robj->initial_domain = args->value & (RADEON_GEM_DOMAIN_VRAM | 725 RADEON_GEM_DOMAIN_GTT | 726 RADEON_GEM_DOMAIN_CPU); 727 break; 728 default: 729 r = -EINVAL; 730 } 731 732 radeon_bo_unreserve(robj); 733 out: 734 drm_gem_object_unreference_unlocked(gobj); 735 return r; 736 } 737 738 int radeon_mode_dumb_create(struct drm_file *file_priv, 739 struct drm_device *dev, 740 struct drm_mode_create_dumb *args) 741 { 742 struct radeon_device *rdev = dev->dev_private; 743 struct drm_gem_object *gobj; 744 uint32_t handle; 745 int r; 746 747 args->pitch = radeon_align_pitch(rdev, args->width, args->bpp, 0) * ((args->bpp + 1) / 8); 748 args->size = args->pitch * args->height; 749 args->size = ALIGN(args->size, PAGE_SIZE); 750 751 r = radeon_gem_object_create(rdev, args->size, 0, 752 RADEON_GEM_DOMAIN_VRAM, 0, 753 false, &gobj); 754 if (r) 755 return -ENOMEM; 756 757 r = drm_gem_handle_create(file_priv, gobj, &handle); 758 /* drop reference from allocate - handle holds it now */ 759 drm_gem_object_unreference_unlocked(gobj); 760 if (r) { 761 return r; 762 } 763 args->handle = handle; 764 return 0; 765 } 766 767 #if defined(CONFIG_DEBUG_FS) 768 static int radeon_debugfs_gem_info(struct seq_file *m, void *data) 769 { 770 struct drm_info_node *node = (struct drm_info_node *)m->private; 771 struct drm_device *dev = node->minor->dev; 772 struct radeon_device *rdev = dev->dev_private; 773 struct radeon_bo *rbo; 774 unsigned i = 0; 775 776 mutex_lock(&rdev->gem.mutex); 777 list_for_each_entry(rbo, &rdev->gem.objects, list) { 778 unsigned domain; 779 const char *placement; 780 781 domain = radeon_mem_type_to_domain(rbo->tbo.mem.mem_type); 782 switch (domain) { 783 case RADEON_GEM_DOMAIN_VRAM: 784 placement = "VRAM"; 785 break; 786 case RADEON_GEM_DOMAIN_GTT: 787 placement = " GTT"; 788 break; 789 case RADEON_GEM_DOMAIN_CPU: 790 default: 791 placement = " CPU"; 792 break; 793 } 794 seq_printf(m, "bo[0x%08x] %8ldkB %8ldMB %s pid %8ld\n", 795 i, radeon_bo_size(rbo) >> 10, radeon_bo_size(rbo) >> 20, 796 placement, (unsigned long)rbo->pid); 797 i++; 798 } 799 mutex_unlock(&rdev->gem.mutex); 800 return 0; 801 } 802 803 static struct drm_info_list radeon_debugfs_gem_list[] = { 804 {"radeon_gem_info", &radeon_debugfs_gem_info, 0, NULL}, 805 }; 806 #endif 807 808 int radeon_gem_debugfs_init(struct radeon_device *rdev) 809 { 810 #if defined(CONFIG_DEBUG_FS) 811 return radeon_debugfs_add_files(rdev, radeon_debugfs_gem_list, 1); 812 #endif 813 return 0; 814 } 815