1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 */ 28 #include <linux/ktime.h> 29 #include <linux/pagemap.h> 30 #include <drm/drmP.h> 31 #include <drm/amdgpu_drm.h> 32 #include "amdgpu.h" 33 34 void amdgpu_gem_object_free(struct drm_gem_object *gobj) 35 { 36 struct amdgpu_bo *robj = gem_to_amdgpu_bo(gobj); 37 38 if (robj) { 39 if (robj->gem_base.import_attach) 40 drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg); 41 amdgpu_mn_unregister(robj); 42 amdgpu_bo_unref(&robj); 43 } 44 } 45 46 int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size, 47 int alignment, u32 initial_domain, 48 u64 flags, bool kernel, 49 struct drm_gem_object **obj) 50 { 51 struct amdgpu_bo *robj; 52 unsigned long max_size; 53 int r; 54 55 *obj = NULL; 56 /* At least align on page size */ 57 if (alignment < PAGE_SIZE) { 58 alignment = PAGE_SIZE; 59 } 60 61 if (!(initial_domain & (AMDGPU_GEM_DOMAIN_GDS | AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA))) { 62 /* Maximum bo size is the unpinned gtt size since we use the gtt to 63 * handle vram to system pool migrations. 64 */ 65 max_size = adev->mc.gtt_size - adev->gart_pin_size; 66 if (size > max_size) { 67 DRM_DEBUG("Allocation size %ldMb bigger than %ldMb limit\n", 68 size >> 20, max_size >> 20); 69 return -ENOMEM; 70 } 71 } 72 retry: 73 r = amdgpu_bo_create(adev, size, alignment, kernel, initial_domain, 74 flags, NULL, NULL, &robj); 75 if (r) { 76 if (r != -ERESTARTSYS) { 77 if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) { 78 initial_domain |= AMDGPU_GEM_DOMAIN_GTT; 79 goto retry; 80 } 81 DRM_ERROR("Failed to allocate GEM object (%ld, %d, %u, %d)\n", 82 size, initial_domain, alignment, r); 83 } 84 return r; 85 } 86 *obj = &robj->gem_base; 87 88 return 0; 89 } 90 91 void amdgpu_gem_force_release(struct amdgpu_device *adev) 92 { 93 struct drm_device *ddev = adev->ddev; 94 struct drm_file *file; 95 96 mutex_lock(&ddev->filelist_mutex); 97 98 list_for_each_entry(file, &ddev->filelist, lhead) { 99 struct drm_gem_object *gobj; 100 int handle; 101 102 WARN_ONCE(1, "Still active user space clients!\n"); 103 spin_lock(&file->table_lock); 104 idr_for_each_entry(&file->object_idr, gobj, handle) { 105 WARN_ONCE(1, "And also active allocations!\n"); 106 drm_gem_object_unreference_unlocked(gobj); 107 } 108 idr_destroy(&file->object_idr); 109 spin_unlock(&file->table_lock); 110 } 111 112 mutex_unlock(&ddev->filelist_mutex); 113 } 114 115 /* 116 * Call from drm_gem_handle_create which appear in both new and open ioctl 117 * case. 118 */ 119 int amdgpu_gem_object_open(struct drm_gem_object *obj, 120 struct drm_file *file_priv) 121 { 122 struct amdgpu_bo *abo = gem_to_amdgpu_bo(obj); 123 struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev); 124 struct amdgpu_fpriv *fpriv = file_priv->driver_priv; 125 struct amdgpu_vm *vm = &fpriv->vm; 126 struct amdgpu_bo_va *bo_va; 127 int r; 128 r = amdgpu_bo_reserve(abo, false); 129 if (r) 130 return r; 131 132 bo_va = amdgpu_vm_bo_find(vm, abo); 133 if (!bo_va) { 134 bo_va = amdgpu_vm_bo_add(adev, vm, abo); 135 } else { 136 ++bo_va->ref_count; 137 } 138 amdgpu_bo_unreserve(abo); 139 return 0; 140 } 141 142 void amdgpu_gem_object_close(struct drm_gem_object *obj, 143 struct drm_file *file_priv) 144 { 145 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); 146 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 147 struct amdgpu_fpriv *fpriv = file_priv->driver_priv; 148 struct amdgpu_vm *vm = &fpriv->vm; 149 150 struct amdgpu_bo_list_entry vm_pd; 151 struct list_head list, duplicates; 152 struct ttm_validate_buffer tv; 153 struct ww_acquire_ctx ticket; 154 struct amdgpu_bo_va *bo_va; 155 int r; 156 157 INIT_LIST_HEAD(&list); 158 INIT_LIST_HEAD(&duplicates); 159 160 tv.bo = &bo->tbo; 161 tv.shared = true; 162 list_add(&tv.head, &list); 163 164 amdgpu_vm_get_pd_bo(vm, &list, &vm_pd); 165 166 r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates); 167 if (r) { 168 dev_err(adev->dev, "leaking bo va because " 169 "we fail to reserve bo (%d)\n", r); 170 return; 171 } 172 bo_va = amdgpu_vm_bo_find(vm, bo); 173 if (bo_va) { 174 if (--bo_va->ref_count == 0) { 175 amdgpu_vm_bo_rmv(adev, bo_va); 176 } 177 } 178 ttm_eu_backoff_reservation(&ticket, &list); 179 } 180 181 static int amdgpu_gem_handle_lockup(struct amdgpu_device *adev, int r) 182 { 183 if (r == -EDEADLK) { 184 r = amdgpu_gpu_reset(adev); 185 if (!r) 186 r = -EAGAIN; 187 } 188 return r; 189 } 190 191 /* 192 * GEM ioctls. 193 */ 194 int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data, 195 struct drm_file *filp) 196 { 197 struct amdgpu_device *adev = dev->dev_private; 198 union drm_amdgpu_gem_create *args = data; 199 uint64_t size = args->in.bo_size; 200 struct drm_gem_object *gobj; 201 uint32_t handle; 202 bool kernel = false; 203 int r; 204 205 /* create a gem object to contain this object in */ 206 if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS | 207 AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) { 208 kernel = true; 209 if (args->in.domains == AMDGPU_GEM_DOMAIN_GDS) 210 size = size << AMDGPU_GDS_SHIFT; 211 else if (args->in.domains == AMDGPU_GEM_DOMAIN_GWS) 212 size = size << AMDGPU_GWS_SHIFT; 213 else if (args->in.domains == AMDGPU_GEM_DOMAIN_OA) 214 size = size << AMDGPU_OA_SHIFT; 215 else { 216 r = -EINVAL; 217 goto error_unlock; 218 } 219 } 220 size = roundup(size, PAGE_SIZE); 221 222 r = amdgpu_gem_object_create(adev, size, args->in.alignment, 223 (u32)(0xffffffff & args->in.domains), 224 args->in.domain_flags, 225 kernel, &gobj); 226 if (r) 227 goto error_unlock; 228 229 r = drm_gem_handle_create(filp, gobj, &handle); 230 /* drop reference from allocate - handle holds it now */ 231 drm_gem_object_unreference_unlocked(gobj); 232 if (r) 233 goto error_unlock; 234 235 memset(args, 0, sizeof(*args)); 236 args->out.handle = handle; 237 return 0; 238 239 error_unlock: 240 r = amdgpu_gem_handle_lockup(adev, r); 241 return r; 242 } 243 244 int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data, 245 struct drm_file *filp) 246 { 247 struct amdgpu_device *adev = dev->dev_private; 248 struct drm_amdgpu_gem_userptr *args = data; 249 struct drm_gem_object *gobj; 250 struct amdgpu_bo *bo; 251 uint32_t handle; 252 int r; 253 254 if (offset_in_page(args->addr | args->size)) 255 return -EINVAL; 256 257 /* reject unknown flag values */ 258 if (args->flags & ~(AMDGPU_GEM_USERPTR_READONLY | 259 AMDGPU_GEM_USERPTR_ANONONLY | AMDGPU_GEM_USERPTR_VALIDATE | 260 AMDGPU_GEM_USERPTR_REGISTER)) 261 return -EINVAL; 262 263 if (!(args->flags & AMDGPU_GEM_USERPTR_READONLY) && 264 !(args->flags & AMDGPU_GEM_USERPTR_REGISTER)) { 265 266 /* if we want to write to it we must install a MMU notifier */ 267 return -EACCES; 268 } 269 270 /* create a gem object to contain this object in */ 271 r = amdgpu_gem_object_create(adev, args->size, 0, 272 AMDGPU_GEM_DOMAIN_CPU, 0, 273 0, &gobj); 274 if (r) 275 goto handle_lockup; 276 277 bo = gem_to_amdgpu_bo(gobj); 278 bo->prefered_domains = AMDGPU_GEM_DOMAIN_GTT; 279 bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT; 280 r = amdgpu_ttm_tt_set_userptr(bo->tbo.ttm, args->addr, args->flags); 281 if (r) 282 goto release_object; 283 284 if (args->flags & AMDGPU_GEM_USERPTR_REGISTER) { 285 r = amdgpu_mn_register(bo, args->addr); 286 if (r) 287 goto release_object; 288 } 289 290 if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) { 291 down_read(¤t->mm->mmap_sem); 292 293 r = amdgpu_ttm_tt_get_user_pages(bo->tbo.ttm, 294 bo->tbo.ttm->pages); 295 if (r) 296 goto unlock_mmap_sem; 297 298 r = amdgpu_bo_reserve(bo, true); 299 if (r) 300 goto free_pages; 301 302 amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT); 303 r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); 304 amdgpu_bo_unreserve(bo); 305 if (r) 306 goto free_pages; 307 308 up_read(¤t->mm->mmap_sem); 309 } 310 311 r = drm_gem_handle_create(filp, gobj, &handle); 312 /* drop reference from allocate - handle holds it now */ 313 drm_gem_object_unreference_unlocked(gobj); 314 if (r) 315 goto handle_lockup; 316 317 args->handle = handle; 318 return 0; 319 320 free_pages: 321 release_pages(bo->tbo.ttm->pages, bo->tbo.ttm->num_pages, false); 322 323 unlock_mmap_sem: 324 up_read(¤t->mm->mmap_sem); 325 326 release_object: 327 drm_gem_object_unreference_unlocked(gobj); 328 329 handle_lockup: 330 r = amdgpu_gem_handle_lockup(adev, r); 331 332 return r; 333 } 334 335 int amdgpu_mode_dumb_mmap(struct drm_file *filp, 336 struct drm_device *dev, 337 uint32_t handle, uint64_t *offset_p) 338 { 339 struct drm_gem_object *gobj; 340 struct amdgpu_bo *robj; 341 342 gobj = drm_gem_object_lookup(filp, handle); 343 if (gobj == NULL) { 344 return -ENOENT; 345 } 346 robj = gem_to_amdgpu_bo(gobj); 347 if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm) || 348 (robj->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) { 349 drm_gem_object_unreference_unlocked(gobj); 350 return -EPERM; 351 } 352 *offset_p = amdgpu_bo_mmap_offset(robj); 353 drm_gem_object_unreference_unlocked(gobj); 354 return 0; 355 } 356 357 int amdgpu_gem_mmap_ioctl(struct drm_device *dev, void *data, 358 struct drm_file *filp) 359 { 360 union drm_amdgpu_gem_mmap *args = data; 361 uint32_t handle = args->in.handle; 362 memset(args, 0, sizeof(*args)); 363 return amdgpu_mode_dumb_mmap(filp, dev, handle, &args->out.addr_ptr); 364 } 365 366 /** 367 * amdgpu_gem_timeout - calculate jiffies timeout from absolute value 368 * 369 * @timeout_ns: timeout in ns 370 * 371 * Calculate the timeout in jiffies from an absolute timeout in ns. 372 */ 373 unsigned long amdgpu_gem_timeout(uint64_t timeout_ns) 374 { 375 unsigned long timeout_jiffies; 376 ktime_t timeout; 377 378 /* clamp timeout if it's to large */ 379 if (((int64_t)timeout_ns) < 0) 380 return MAX_SCHEDULE_TIMEOUT; 381 382 timeout = ktime_sub(ns_to_ktime(timeout_ns), ktime_get()); 383 if (ktime_to_ns(timeout) < 0) 384 return 0; 385 386 timeout_jiffies = nsecs_to_jiffies(ktime_to_ns(timeout)); 387 /* clamp timeout to avoid unsigned-> signed overflow */ 388 if (timeout_jiffies > MAX_SCHEDULE_TIMEOUT ) 389 return MAX_SCHEDULE_TIMEOUT - 1; 390 391 return timeout_jiffies; 392 } 393 394 int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data, 395 struct drm_file *filp) 396 { 397 struct amdgpu_device *adev = dev->dev_private; 398 union drm_amdgpu_gem_wait_idle *args = data; 399 struct drm_gem_object *gobj; 400 struct amdgpu_bo *robj; 401 uint32_t handle = args->in.handle; 402 unsigned long timeout = amdgpu_gem_timeout(args->in.timeout); 403 int r = 0; 404 long ret; 405 406 gobj = drm_gem_object_lookup(filp, handle); 407 if (gobj == NULL) { 408 return -ENOENT; 409 } 410 robj = gem_to_amdgpu_bo(gobj); 411 ret = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true, 412 timeout); 413 414 /* ret == 0 means not signaled, 415 * ret > 0 means signaled 416 * ret < 0 means interrupted before timeout 417 */ 418 if (ret >= 0) { 419 memset(args, 0, sizeof(*args)); 420 args->out.status = (ret == 0); 421 } else 422 r = ret; 423 424 drm_gem_object_unreference_unlocked(gobj); 425 r = amdgpu_gem_handle_lockup(adev, r); 426 return r; 427 } 428 429 int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data, 430 struct drm_file *filp) 431 { 432 struct drm_amdgpu_gem_metadata *args = data; 433 struct drm_gem_object *gobj; 434 struct amdgpu_bo *robj; 435 int r = -1; 436 437 DRM_DEBUG("%d \n", args->handle); 438 gobj = drm_gem_object_lookup(filp, args->handle); 439 if (gobj == NULL) 440 return -ENOENT; 441 robj = gem_to_amdgpu_bo(gobj); 442 443 r = amdgpu_bo_reserve(robj, false); 444 if (unlikely(r != 0)) 445 goto out; 446 447 if (args->op == AMDGPU_GEM_METADATA_OP_GET_METADATA) { 448 amdgpu_bo_get_tiling_flags(robj, &args->data.tiling_info); 449 r = amdgpu_bo_get_metadata(robj, args->data.data, 450 sizeof(args->data.data), 451 &args->data.data_size_bytes, 452 &args->data.flags); 453 } else if (args->op == AMDGPU_GEM_METADATA_OP_SET_METADATA) { 454 if (args->data.data_size_bytes > sizeof(args->data.data)) { 455 r = -EINVAL; 456 goto unreserve; 457 } 458 r = amdgpu_bo_set_tiling_flags(robj, args->data.tiling_info); 459 if (!r) 460 r = amdgpu_bo_set_metadata(robj, args->data.data, 461 args->data.data_size_bytes, 462 args->data.flags); 463 } 464 465 unreserve: 466 amdgpu_bo_unreserve(robj); 467 out: 468 drm_gem_object_unreference_unlocked(gobj); 469 return r; 470 } 471 472 static int amdgpu_gem_va_check(void *param, struct amdgpu_bo *bo) 473 { 474 unsigned domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type); 475 476 /* if anything is swapped out don't swap it in here, 477 just abort and wait for the next CS */ 478 479 return domain == AMDGPU_GEM_DOMAIN_CPU ? -ERESTARTSYS : 0; 480 } 481 482 /** 483 * amdgpu_gem_va_update_vm -update the bo_va in its VM 484 * 485 * @adev: amdgpu_device pointer 486 * @bo_va: bo_va to update 487 * 488 * Update the bo_va directly after setting it's address. Errors are not 489 * vital here, so they are not reported back to userspace. 490 */ 491 static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev, 492 struct amdgpu_bo_va *bo_va, 493 uint32_t operation) 494 { 495 struct ttm_validate_buffer tv, *entry; 496 struct amdgpu_bo_list_entry vm_pd; 497 struct ww_acquire_ctx ticket; 498 struct list_head list, duplicates; 499 unsigned domain; 500 int r; 501 502 INIT_LIST_HEAD(&list); 503 INIT_LIST_HEAD(&duplicates); 504 505 tv.bo = &bo_va->bo->tbo; 506 tv.shared = true; 507 list_add(&tv.head, &list); 508 509 amdgpu_vm_get_pd_bo(bo_va->vm, &list, &vm_pd); 510 511 /* Provide duplicates to avoid -EALREADY */ 512 r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates); 513 if (r) 514 goto error_print; 515 516 list_for_each_entry(entry, &list, head) { 517 domain = amdgpu_mem_type_to_domain(entry->bo->mem.mem_type); 518 /* if anything is swapped out don't swap it in here, 519 just abort and wait for the next CS */ 520 if (domain == AMDGPU_GEM_DOMAIN_CPU) 521 goto error_unreserve; 522 } 523 r = amdgpu_vm_validate_pt_bos(adev, bo_va->vm, amdgpu_gem_va_check, 524 NULL); 525 if (r) 526 goto error_unreserve; 527 528 r = amdgpu_vm_update_page_directory(adev, bo_va->vm); 529 if (r) 530 goto error_unreserve; 531 532 r = amdgpu_vm_clear_freed(adev, bo_va->vm); 533 if (r) 534 goto error_unreserve; 535 536 if (operation == AMDGPU_VA_OP_MAP) 537 r = amdgpu_vm_bo_update(adev, bo_va, false); 538 539 error_unreserve: 540 ttm_eu_backoff_reservation(&ticket, &list); 541 542 error_print: 543 if (r && r != -ERESTARTSYS) 544 DRM_ERROR("Couldn't update BO_VA (%d)\n", r); 545 } 546 547 int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, 548 struct drm_file *filp) 549 { 550 struct drm_amdgpu_gem_va *args = data; 551 struct drm_gem_object *gobj; 552 struct amdgpu_device *adev = dev->dev_private; 553 struct amdgpu_fpriv *fpriv = filp->driver_priv; 554 struct amdgpu_bo *abo; 555 struct amdgpu_bo_va *bo_va; 556 struct amdgpu_bo_list_entry vm_pd; 557 struct ttm_validate_buffer tv; 558 struct ww_acquire_ctx ticket; 559 struct list_head list, duplicates; 560 uint32_t invalid_flags, va_flags = 0; 561 int r = 0; 562 563 if (!adev->vm_manager.enabled) 564 return -ENOTTY; 565 566 if (args->va_address < AMDGPU_VA_RESERVED_SIZE) { 567 dev_err(&dev->pdev->dev, 568 "va_address 0x%lX is in reserved area 0x%X\n", 569 (unsigned long)args->va_address, 570 AMDGPU_VA_RESERVED_SIZE); 571 return -EINVAL; 572 } 573 574 invalid_flags = ~(AMDGPU_VM_DELAY_UPDATE | AMDGPU_VM_PAGE_READABLE | 575 AMDGPU_VM_PAGE_WRITEABLE | AMDGPU_VM_PAGE_EXECUTABLE); 576 if ((args->flags & invalid_flags)) { 577 dev_err(&dev->pdev->dev, "invalid flags 0x%08X vs 0x%08X\n", 578 args->flags, invalid_flags); 579 return -EINVAL; 580 } 581 582 switch (args->operation) { 583 case AMDGPU_VA_OP_MAP: 584 case AMDGPU_VA_OP_UNMAP: 585 break; 586 default: 587 dev_err(&dev->pdev->dev, "unsupported operation %d\n", 588 args->operation); 589 return -EINVAL; 590 } 591 592 gobj = drm_gem_object_lookup(filp, args->handle); 593 if (gobj == NULL) 594 return -ENOENT; 595 abo = gem_to_amdgpu_bo(gobj); 596 INIT_LIST_HEAD(&list); 597 INIT_LIST_HEAD(&duplicates); 598 tv.bo = &abo->tbo; 599 tv.shared = true; 600 list_add(&tv.head, &list); 601 602 amdgpu_vm_get_pd_bo(&fpriv->vm, &list, &vm_pd); 603 604 r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates); 605 if (r) { 606 drm_gem_object_unreference_unlocked(gobj); 607 return r; 608 } 609 610 bo_va = amdgpu_vm_bo_find(&fpriv->vm, abo); 611 if (!bo_va) { 612 ttm_eu_backoff_reservation(&ticket, &list); 613 drm_gem_object_unreference_unlocked(gobj); 614 return -ENOENT; 615 } 616 617 switch (args->operation) { 618 case AMDGPU_VA_OP_MAP: 619 if (args->flags & AMDGPU_VM_PAGE_READABLE) 620 va_flags |= AMDGPU_PTE_READABLE; 621 if (args->flags & AMDGPU_VM_PAGE_WRITEABLE) 622 va_flags |= AMDGPU_PTE_WRITEABLE; 623 if (args->flags & AMDGPU_VM_PAGE_EXECUTABLE) 624 va_flags |= AMDGPU_PTE_EXECUTABLE; 625 r = amdgpu_vm_bo_map(adev, bo_va, args->va_address, 626 args->offset_in_bo, args->map_size, 627 va_flags); 628 break; 629 case AMDGPU_VA_OP_UNMAP: 630 r = amdgpu_vm_bo_unmap(adev, bo_va, args->va_address); 631 break; 632 default: 633 break; 634 } 635 ttm_eu_backoff_reservation(&ticket, &list); 636 if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE) && 637 !amdgpu_vm_debug) 638 amdgpu_gem_va_update_vm(adev, bo_va, args->operation); 639 640 drm_gem_object_unreference_unlocked(gobj); 641 return r; 642 } 643 644 int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data, 645 struct drm_file *filp) 646 { 647 struct drm_amdgpu_gem_op *args = data; 648 struct drm_gem_object *gobj; 649 struct amdgpu_bo *robj; 650 int r; 651 652 gobj = drm_gem_object_lookup(filp, args->handle); 653 if (gobj == NULL) { 654 return -ENOENT; 655 } 656 robj = gem_to_amdgpu_bo(gobj); 657 658 r = amdgpu_bo_reserve(robj, false); 659 if (unlikely(r)) 660 goto out; 661 662 switch (args->op) { 663 case AMDGPU_GEM_OP_GET_GEM_CREATE_INFO: { 664 struct drm_amdgpu_gem_create_in info; 665 void __user *out = (void __user *)(long)args->value; 666 667 info.bo_size = robj->gem_base.size; 668 info.alignment = robj->tbo.mem.page_alignment << PAGE_SHIFT; 669 info.domains = robj->prefered_domains; 670 info.domain_flags = robj->flags; 671 amdgpu_bo_unreserve(robj); 672 if (copy_to_user(out, &info, sizeof(info))) 673 r = -EFAULT; 674 break; 675 } 676 case AMDGPU_GEM_OP_SET_PLACEMENT: 677 if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm)) { 678 r = -EPERM; 679 amdgpu_bo_unreserve(robj); 680 break; 681 } 682 robj->prefered_domains = args->value & (AMDGPU_GEM_DOMAIN_VRAM | 683 AMDGPU_GEM_DOMAIN_GTT | 684 AMDGPU_GEM_DOMAIN_CPU); 685 robj->allowed_domains = robj->prefered_domains; 686 if (robj->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM) 687 robj->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT; 688 689 amdgpu_bo_unreserve(robj); 690 break; 691 default: 692 amdgpu_bo_unreserve(robj); 693 r = -EINVAL; 694 } 695 696 out: 697 drm_gem_object_unreference_unlocked(gobj); 698 return r; 699 } 700 701 int amdgpu_mode_dumb_create(struct drm_file *file_priv, 702 struct drm_device *dev, 703 struct drm_mode_create_dumb *args) 704 { 705 struct amdgpu_device *adev = dev->dev_private; 706 struct drm_gem_object *gobj; 707 uint32_t handle; 708 int r; 709 710 args->pitch = amdgpu_align_pitch(adev, args->width, 711 DIV_ROUND_UP(args->bpp, 8), 0); 712 args->size = (u64)args->pitch * args->height; 713 args->size = ALIGN(args->size, PAGE_SIZE); 714 715 r = amdgpu_gem_object_create(adev, args->size, 0, 716 AMDGPU_GEM_DOMAIN_VRAM, 717 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, 718 ttm_bo_type_device, 719 &gobj); 720 if (r) 721 return -ENOMEM; 722 723 r = drm_gem_handle_create(file_priv, gobj, &handle); 724 /* drop reference from allocate - handle holds it now */ 725 drm_gem_object_unreference_unlocked(gobj); 726 if (r) { 727 return r; 728 } 729 args->handle = handle; 730 return 0; 731 } 732 733 #if defined(CONFIG_DEBUG_FS) 734 static int amdgpu_debugfs_gem_bo_info(int id, void *ptr, void *data) 735 { 736 struct drm_gem_object *gobj = ptr; 737 struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj); 738 struct seq_file *m = data; 739 740 unsigned domain; 741 const char *placement; 742 unsigned pin_count; 743 744 domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type); 745 switch (domain) { 746 case AMDGPU_GEM_DOMAIN_VRAM: 747 placement = "VRAM"; 748 break; 749 case AMDGPU_GEM_DOMAIN_GTT: 750 placement = " GTT"; 751 break; 752 case AMDGPU_GEM_DOMAIN_CPU: 753 default: 754 placement = " CPU"; 755 break; 756 } 757 seq_printf(m, "\t0x%08x: %12ld byte %s @ 0x%010Lx", 758 id, amdgpu_bo_size(bo), placement, 759 amdgpu_bo_gpu_offset(bo)); 760 761 pin_count = ACCESS_ONCE(bo->pin_count); 762 if (pin_count) 763 seq_printf(m, " pin count %d", pin_count); 764 seq_printf(m, "\n"); 765 766 return 0; 767 } 768 769 static int amdgpu_debugfs_gem_info(struct seq_file *m, void *data) 770 { 771 struct drm_info_node *node = (struct drm_info_node *)m->private; 772 struct drm_device *dev = node->minor->dev; 773 struct drm_file *file; 774 int r; 775 776 r = mutex_lock_interruptible(&dev->filelist_mutex); 777 if (r) 778 return r; 779 780 list_for_each_entry(file, &dev->filelist, lhead) { 781 struct task_struct *task; 782 783 /* 784 * Although we have a valid reference on file->pid, that does 785 * not guarantee that the task_struct who called get_pid() is 786 * still alive (e.g. get_pid(current) => fork() => exit()). 787 * Therefore, we need to protect this ->comm access using RCU. 788 */ 789 rcu_read_lock(); 790 task = pid_task(file->pid, PIDTYPE_PID); 791 seq_printf(m, "pid %8d command %s:\n", pid_nr(file->pid), 792 task ? task->comm : "<unknown>"); 793 rcu_read_unlock(); 794 795 spin_lock(&file->table_lock); 796 idr_for_each(&file->object_idr, amdgpu_debugfs_gem_bo_info, m); 797 spin_unlock(&file->table_lock); 798 } 799 800 mutex_unlock(&dev->filelist_mutex); 801 return 0; 802 } 803 804 static const struct drm_info_list amdgpu_debugfs_gem_list[] = { 805 {"amdgpu_gem_info", &amdgpu_debugfs_gem_info, 0, NULL}, 806 }; 807 #endif 808 809 int amdgpu_gem_debugfs_init(struct amdgpu_device *adev) 810 { 811 #if defined(CONFIG_DEBUG_FS) 812 return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_gem_list, 1); 813 #endif 814 return 0; 815 } 816