1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 */ 28 #include <drm/drmP.h> 29 #include <drm/radeon_drm.h> 30 #include "radeon.h" 31 32 void radeon_gem_object_free(struct drm_gem_object *gobj) 33 { 34 struct radeon_bo *robj = gem_to_radeon_bo(gobj); 35 36 if (robj) { 37 if (robj->gem_base.import_attach) 38 drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg); 39 radeon_bo_unref(&robj); 40 } 41 } 42 43 int radeon_gem_object_create(struct radeon_device *rdev, unsigned long size, 44 int alignment, int initial_domain, 45 u32 flags, bool kernel, 46 struct drm_gem_object **obj) 47 { 48 struct radeon_bo *robj; 49 unsigned long max_size; 50 int r; 51 52 *obj = NULL; 53 /* At least align on page size */ 54 if (alignment < PAGE_SIZE) { 55 alignment = PAGE_SIZE; 56 } 57 58 /* Maximum bo size is the unpinned gtt size since we use the gtt to 59 * handle vram to system pool migrations. 60 */ 61 max_size = rdev->mc.gtt_size - rdev->gart_pin_size; 62 if (size > max_size) { 63 DRM_DEBUG("Allocation size %ldMb bigger than %ldMb limit\n", 64 size >> 20, max_size >> 20); 65 return -ENOMEM; 66 } 67 68 retry: 69 r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain, 70 flags, NULL, &robj); 71 if (r) { 72 if (r != -ERESTARTSYS) { 73 if (initial_domain == RADEON_GEM_DOMAIN_VRAM) { 74 initial_domain |= RADEON_GEM_DOMAIN_GTT; 75 goto retry; 76 } 77 DRM_ERROR("Failed to allocate GEM object (%ld, %d, %u, %d)\n", 78 size, initial_domain, alignment, r); 79 } 80 return r; 81 } 82 *obj = &robj->gem_base; 83 robj->pid = task_pid_nr(current); 84 85 mutex_lock(&rdev->gem.mutex); 86 list_add_tail(&robj->list, &rdev->gem.objects); 87 mutex_unlock(&rdev->gem.mutex); 88 89 return 0; 90 } 91 92 static int radeon_gem_set_domain(struct drm_gem_object *gobj, 93 uint32_t rdomain, uint32_t wdomain) 94 { 95 struct radeon_bo *robj; 96 uint32_t domain; 97 int r; 98 99 /* FIXME: reeimplement */ 100 robj = gem_to_radeon_bo(gobj); 101 /* work out where to validate the buffer to */ 102 domain = wdomain; 103 if (!domain) { 104 domain = rdomain; 105 } 106 if (!domain) { 107 /* Do nothings */ 108 printk(KERN_WARNING "Set domain without domain !\n"); 109 return 0; 110 } 111 if (domain == RADEON_GEM_DOMAIN_CPU) { 112 /* Asking for cpu access wait for object idle */ 113 r = radeon_bo_wait(robj, NULL, false); 114 if (r) { 115 printk(KERN_ERR "Failed to wait for object !\n"); 116 return r; 117 } 118 } 119 return 0; 120 } 121 122 int radeon_gem_init(struct radeon_device *rdev) 123 { 124 INIT_LIST_HEAD(&rdev->gem.objects); 125 return 0; 126 } 127 128 void radeon_gem_fini(struct radeon_device *rdev) 129 { 130 radeon_bo_force_delete(rdev); 131 } 132 133 /* 134 * Call from drm_gem_handle_create which appear in both new and open ioctl 135 * case. 136 */ 137 int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv) 138 { 139 struct radeon_bo *rbo = gem_to_radeon_bo(obj); 140 struct radeon_device *rdev = rbo->rdev; 141 struct radeon_fpriv *fpriv = file_priv->driver_priv; 142 struct radeon_vm *vm = &fpriv->vm; 143 struct radeon_bo_va *bo_va; 144 int r; 145 146 if (rdev->family < CHIP_CAYMAN) { 147 return 0; 148 } 149 150 r = radeon_bo_reserve(rbo, false); 151 if (r) { 152 return r; 153 } 154 155 bo_va = radeon_vm_bo_find(vm, rbo); 156 if (!bo_va) { 157 bo_va = radeon_vm_bo_add(rdev, vm, rbo); 158 } else { 159 ++bo_va->ref_count; 160 } 161 radeon_bo_unreserve(rbo); 162 163 return 0; 164 } 165 166 void radeon_gem_object_close(struct drm_gem_object *obj, 167 struct drm_file *file_priv) 168 { 169 struct radeon_bo *rbo = gem_to_radeon_bo(obj); 170 struct radeon_device *rdev = rbo->rdev; 171 struct radeon_fpriv *fpriv = file_priv->driver_priv; 172 struct radeon_vm *vm = &fpriv->vm; 173 struct radeon_bo_va *bo_va; 174 int r; 175 176 if (rdev->family < CHIP_CAYMAN) { 177 return; 178 } 179 180 r = radeon_bo_reserve(rbo, true); 181 if (r) { 182 dev_err(rdev->dev, "leaking bo va because " 183 "we fail to reserve bo (%d)\n", r); 184 return; 185 } 186 bo_va = radeon_vm_bo_find(vm, rbo); 187 if (bo_va) { 188 if (--bo_va->ref_count == 0) { 189 radeon_vm_bo_rmv(rdev, bo_va); 190 } 191 } 192 radeon_bo_unreserve(rbo); 193 } 194 195 static int radeon_gem_handle_lockup(struct radeon_device *rdev, int r) 196 { 197 if (r == -EDEADLK) { 198 r = radeon_gpu_reset(rdev); 199 if (!r) 200 r = -EAGAIN; 201 } 202 return r; 203 } 204 205 /* 206 * GEM ioctls. 207 */ 208 int radeon_gem_info_ioctl(struct drm_device *dev, void *data, 209 struct drm_file *filp) 210 { 211 struct radeon_device *rdev = dev->dev_private; 212 struct drm_radeon_gem_info *args = data; 213 struct ttm_mem_type_manager *man; 214 215 man = &rdev->mman.bdev.man[TTM_PL_VRAM]; 216 217 args->vram_size = rdev->mc.real_vram_size; 218 args->vram_visible = (u64)man->size << PAGE_SHIFT; 219 args->vram_visible -= rdev->vram_pin_size; 220 args->gart_size = rdev->mc.gtt_size; 221 args->gart_size -= rdev->gart_pin_size; 222 223 return 0; 224 } 225 226 int radeon_gem_pread_ioctl(struct drm_device *dev, void *data, 227 struct drm_file *filp) 228 { 229 /* TODO: implement */ 230 DRM_ERROR("unimplemented %s\n", __func__); 231 return -ENOSYS; 232 } 233 234 int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data, 235 struct drm_file *filp) 236 { 237 /* TODO: implement */ 238 DRM_ERROR("unimplemented %s\n", __func__); 239 return -ENOSYS; 240 } 241 242 int radeon_gem_create_ioctl(struct drm_device *dev, void *data, 243 struct drm_file *filp) 244 { 245 struct radeon_device *rdev = dev->dev_private; 246 struct drm_radeon_gem_create *args = data; 247 struct drm_gem_object *gobj; 248 uint32_t handle; 249 int r; 250 251 down_read(&rdev->exclusive_lock); 252 /* create a gem object to contain this object in */ 253 args->size = roundup(args->size, PAGE_SIZE); 254 r = radeon_gem_object_create(rdev, args->size, args->alignment, 255 args->initial_domain, args->flags, 256 false, &gobj); 257 if (r) { 258 up_read(&rdev->exclusive_lock); 259 r = radeon_gem_handle_lockup(rdev, r); 260 return r; 261 } 262 r = drm_gem_handle_create(filp, gobj, &handle); 263 /* drop reference from allocate - handle holds it now */ 264 drm_gem_object_unreference_unlocked(gobj); 265 if (r) { 266 up_read(&rdev->exclusive_lock); 267 r = radeon_gem_handle_lockup(rdev, r); 268 return r; 269 } 270 args->handle = handle; 271 up_read(&rdev->exclusive_lock); 272 return 0; 273 } 274 275 int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data, 276 struct drm_file *filp) 277 { 278 /* transition the BO to a domain - 279 * just validate the BO into a certain domain */ 280 struct radeon_device *rdev = dev->dev_private; 281 struct drm_radeon_gem_set_domain *args = data; 282 struct drm_gem_object *gobj; 283 struct radeon_bo *robj; 284 int r; 285 286 /* for now if someone requests domain CPU - 287 * just make sure the buffer is finished with */ 288 down_read(&rdev->exclusive_lock); 289 290 /* just do a BO wait for now */ 291 gobj = drm_gem_object_lookup(dev, filp, args->handle); 292 if (gobj == NULL) { 293 up_read(&rdev->exclusive_lock); 294 return -ENOENT; 295 } 296 robj = gem_to_radeon_bo(gobj); 297 298 r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain); 299 300 drm_gem_object_unreference_unlocked(gobj); 301 up_read(&rdev->exclusive_lock); 302 r = radeon_gem_handle_lockup(robj->rdev, r); 303 return r; 304 } 305 306 int radeon_mode_dumb_mmap(struct drm_file *filp, 307 struct drm_device *dev, 308 uint32_t handle, uint64_t *offset_p) 309 { 310 struct drm_gem_object *gobj; 311 struct radeon_bo *robj; 312 313 gobj = drm_gem_object_lookup(dev, filp, handle); 314 if (gobj == NULL) { 315 return -ENOENT; 316 } 317 robj = gem_to_radeon_bo(gobj); 318 *offset_p = radeon_bo_mmap_offset(robj); 319 drm_gem_object_unreference_unlocked(gobj); 320 return 0; 321 } 322 323 int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data, 324 struct drm_file *filp) 325 { 326 struct drm_radeon_gem_mmap *args = data; 327 328 return radeon_mode_dumb_mmap(filp, dev, args->handle, &args->addr_ptr); 329 } 330 331 int radeon_gem_busy_ioctl(struct drm_device *dev, void *data, 332 struct drm_file *filp) 333 { 334 struct radeon_device *rdev = dev->dev_private; 335 struct drm_radeon_gem_busy *args = data; 336 struct drm_gem_object *gobj; 337 struct radeon_bo *robj; 338 int r; 339 uint32_t cur_placement = 0; 340 341 gobj = drm_gem_object_lookup(dev, filp, args->handle); 342 if (gobj == NULL) { 343 return -ENOENT; 344 } 345 robj = gem_to_radeon_bo(gobj); 346 r = radeon_bo_wait(robj, &cur_placement, true); 347 args->domain = radeon_mem_type_to_domain(cur_placement); 348 drm_gem_object_unreference_unlocked(gobj); 349 r = radeon_gem_handle_lockup(rdev, r); 350 return r; 351 } 352 353 int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data, 354 struct drm_file *filp) 355 { 356 struct radeon_device *rdev = dev->dev_private; 357 struct drm_radeon_gem_wait_idle *args = data; 358 struct drm_gem_object *gobj; 359 struct radeon_bo *robj; 360 int r; 361 uint32_t cur_placement = 0; 362 363 gobj = drm_gem_object_lookup(dev, filp, args->handle); 364 if (gobj == NULL) { 365 return -ENOENT; 366 } 367 robj = gem_to_radeon_bo(gobj); 368 r = radeon_bo_wait(robj, &cur_placement, false); 369 /* Flush HDP cache via MMIO if necessary */ 370 if (rdev->asic->mmio_hdp_flush && 371 radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM) 372 robj->rdev->asic->mmio_hdp_flush(rdev); 373 drm_gem_object_unreference_unlocked(gobj); 374 r = radeon_gem_handle_lockup(rdev, r); 375 return r; 376 } 377 378 int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data, 379 struct drm_file *filp) 380 { 381 struct drm_radeon_gem_set_tiling *args = data; 382 struct drm_gem_object *gobj; 383 struct radeon_bo *robj; 384 int r = 0; 385 386 DRM_DEBUG("%d \n", args->handle); 387 gobj = drm_gem_object_lookup(dev, filp, args->handle); 388 if (gobj == NULL) 389 return -ENOENT; 390 robj = gem_to_radeon_bo(gobj); 391 r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch); 392 drm_gem_object_unreference_unlocked(gobj); 393 return r; 394 } 395 396 int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data, 397 struct drm_file *filp) 398 { 399 struct drm_radeon_gem_get_tiling *args = data; 400 struct drm_gem_object *gobj; 401 struct radeon_bo *rbo; 402 int r = 0; 403 404 DRM_DEBUG("\n"); 405 gobj = drm_gem_object_lookup(dev, filp, args->handle); 406 if (gobj == NULL) 407 return -ENOENT; 408 rbo = gem_to_radeon_bo(gobj); 409 r = radeon_bo_reserve(rbo, false); 410 if (unlikely(r != 0)) 411 goto out; 412 radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch); 413 radeon_bo_unreserve(rbo); 414 out: 415 drm_gem_object_unreference_unlocked(gobj); 416 return r; 417 } 418 419 int radeon_gem_va_ioctl(struct drm_device *dev, void *data, 420 struct drm_file *filp) 421 { 422 struct drm_radeon_gem_va *args = data; 423 struct drm_gem_object *gobj; 424 struct radeon_device *rdev = dev->dev_private; 425 struct radeon_fpriv *fpriv = filp->driver_priv; 426 struct radeon_bo *rbo; 427 struct radeon_bo_va *bo_va; 428 u32 invalid_flags; 429 int r = 0; 430 431 if (!rdev->vm_manager.enabled) { 432 args->operation = RADEON_VA_RESULT_ERROR; 433 return -ENOTTY; 434 } 435 436 /* !! DONT REMOVE !! 437 * We don't support vm_id yet, to be sure we don't have have broken 438 * userspace, reject anyone trying to use non 0 value thus moving 439 * forward we can use those fields without breaking existant userspace 440 */ 441 if (args->vm_id) { 442 args->operation = RADEON_VA_RESULT_ERROR; 443 return -EINVAL; 444 } 445 446 if (args->offset < RADEON_VA_RESERVED_SIZE) { 447 dev_err(&dev->pdev->dev, 448 "offset 0x%lX is in reserved area 0x%X\n", 449 (unsigned long)args->offset, 450 RADEON_VA_RESERVED_SIZE); 451 args->operation = RADEON_VA_RESULT_ERROR; 452 return -EINVAL; 453 } 454 455 /* don't remove, we need to enforce userspace to set the snooped flag 456 * otherwise we will endup with broken userspace and we won't be able 457 * to enable this feature without adding new interface 458 */ 459 invalid_flags = RADEON_VM_PAGE_VALID | RADEON_VM_PAGE_SYSTEM; 460 if ((args->flags & invalid_flags)) { 461 dev_err(&dev->pdev->dev, "invalid flags 0x%08X vs 0x%08X\n", 462 args->flags, invalid_flags); 463 args->operation = RADEON_VA_RESULT_ERROR; 464 return -EINVAL; 465 } 466 467 switch (args->operation) { 468 case RADEON_VA_MAP: 469 case RADEON_VA_UNMAP: 470 break; 471 default: 472 dev_err(&dev->pdev->dev, "unsupported operation %d\n", 473 args->operation); 474 args->operation = RADEON_VA_RESULT_ERROR; 475 return -EINVAL; 476 } 477 478 gobj = drm_gem_object_lookup(dev, filp, args->handle); 479 if (gobj == NULL) { 480 args->operation = RADEON_VA_RESULT_ERROR; 481 return -ENOENT; 482 } 483 rbo = gem_to_radeon_bo(gobj); 484 r = radeon_bo_reserve(rbo, false); 485 if (r) { 486 args->operation = RADEON_VA_RESULT_ERROR; 487 drm_gem_object_unreference_unlocked(gobj); 488 return r; 489 } 490 bo_va = radeon_vm_bo_find(&fpriv->vm, rbo); 491 if (!bo_va) { 492 args->operation = RADEON_VA_RESULT_ERROR; 493 drm_gem_object_unreference_unlocked(gobj); 494 return -ENOENT; 495 } 496 497 switch (args->operation) { 498 case RADEON_VA_MAP: 499 if (bo_va->it.start) { 500 args->operation = RADEON_VA_RESULT_VA_EXIST; 501 args->offset = bo_va->it.start * RADEON_GPU_PAGE_SIZE; 502 goto out; 503 } 504 r = radeon_vm_bo_set_addr(rdev, bo_va, args->offset, args->flags); 505 break; 506 case RADEON_VA_UNMAP: 507 r = radeon_vm_bo_set_addr(rdev, bo_va, 0, 0); 508 break; 509 default: 510 break; 511 } 512 args->operation = RADEON_VA_RESULT_OK; 513 if (r) { 514 args->operation = RADEON_VA_RESULT_ERROR; 515 } 516 out: 517 radeon_bo_unreserve(rbo); 518 drm_gem_object_unreference_unlocked(gobj); 519 return r; 520 } 521 522 int radeon_gem_op_ioctl(struct drm_device *dev, void *data, 523 struct drm_file *filp) 524 { 525 struct drm_radeon_gem_op *args = data; 526 struct drm_gem_object *gobj; 527 struct radeon_bo *robj; 528 int r; 529 530 gobj = drm_gem_object_lookup(dev, filp, args->handle); 531 if (gobj == NULL) { 532 return -ENOENT; 533 } 534 robj = gem_to_radeon_bo(gobj); 535 r = radeon_bo_reserve(robj, false); 536 if (unlikely(r)) 537 goto out; 538 539 switch (args->op) { 540 case RADEON_GEM_OP_GET_INITIAL_DOMAIN: 541 args->value = robj->initial_domain; 542 break; 543 case RADEON_GEM_OP_SET_INITIAL_DOMAIN: 544 robj->initial_domain = args->value & (RADEON_GEM_DOMAIN_VRAM | 545 RADEON_GEM_DOMAIN_GTT | 546 RADEON_GEM_DOMAIN_CPU); 547 break; 548 default: 549 r = -EINVAL; 550 } 551 552 radeon_bo_unreserve(robj); 553 out: 554 drm_gem_object_unreference_unlocked(gobj); 555 return r; 556 } 557 558 int radeon_mode_dumb_create(struct drm_file *file_priv, 559 struct drm_device *dev, 560 struct drm_mode_create_dumb *args) 561 { 562 struct radeon_device *rdev = dev->dev_private; 563 struct drm_gem_object *gobj; 564 uint32_t handle; 565 int r; 566 567 args->pitch = radeon_align_pitch(rdev, args->width, args->bpp, 0) * ((args->bpp + 1) / 8); 568 args->size = args->pitch * args->height; 569 args->size = ALIGN(args->size, PAGE_SIZE); 570 571 r = radeon_gem_object_create(rdev, args->size, 0, 572 RADEON_GEM_DOMAIN_VRAM, 0, 573 false, &gobj); 574 if (r) 575 return -ENOMEM; 576 577 r = drm_gem_handle_create(file_priv, gobj, &handle); 578 /* drop reference from allocate - handle holds it now */ 579 drm_gem_object_unreference_unlocked(gobj); 580 if (r) { 581 return r; 582 } 583 args->handle = handle; 584 return 0; 585 } 586 587 #if defined(CONFIG_DEBUG_FS) 588 static int radeon_debugfs_gem_info(struct seq_file *m, void *data) 589 { 590 struct drm_info_node *node = (struct drm_info_node *)m->private; 591 struct drm_device *dev = node->minor->dev; 592 struct radeon_device *rdev = dev->dev_private; 593 struct radeon_bo *rbo; 594 unsigned i = 0; 595 596 mutex_lock(&rdev->gem.mutex); 597 list_for_each_entry(rbo, &rdev->gem.objects, list) { 598 unsigned domain; 599 const char *placement; 600 601 domain = radeon_mem_type_to_domain(rbo->tbo.mem.mem_type); 602 switch (domain) { 603 case RADEON_GEM_DOMAIN_VRAM: 604 placement = "VRAM"; 605 break; 606 case RADEON_GEM_DOMAIN_GTT: 607 placement = " GTT"; 608 break; 609 case RADEON_GEM_DOMAIN_CPU: 610 default: 611 placement = " CPU"; 612 break; 613 } 614 seq_printf(m, "bo[0x%08x] %8ldkB %8ldMB %s pid %8ld\n", 615 i, radeon_bo_size(rbo) >> 10, radeon_bo_size(rbo) >> 20, 616 placement, (unsigned long)rbo->pid); 617 i++; 618 } 619 mutex_unlock(&rdev->gem.mutex); 620 return 0; 621 } 622 623 static struct drm_info_list radeon_debugfs_gem_list[] = { 624 {"radeon_gem_info", &radeon_debugfs_gem_info, 0, NULL}, 625 }; 626 #endif 627 628 int radeon_gem_debugfs_init(struct radeon_device *rdev) 629 { 630 #if defined(CONFIG_DEBUG_FS) 631 return radeon_debugfs_add_files(rdev, radeon_debugfs_gem_list, 1); 632 #endif 633 return 0; 634 } 635