1 /* exynos_drm_gem.c 2 * 3 * Copyright (c) 2011 Samsung Electronics Co., Ltd. 4 * Author: Inki Dae <inki.dae@samsung.com> 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice (including the next 14 * paragraph) shall be included in all copies or substantial portions of the 15 * Software. 16 * 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 20 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR 21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 23 * OTHER DEALINGS IN THE SOFTWARE. 24 */ 25 26 #include <drm/drmP.h> 27 28 #include <linux/shmem_fs.h> 29 #include <drm/exynos_drm.h> 30 31 #include "exynos_drm_drv.h" 32 #include "exynos_drm_gem.h" 33 #include "exynos_drm_buf.h" 34 35 static unsigned int convert_to_vm_err_msg(int msg) 36 { 37 unsigned int out_msg; 38 39 switch (msg) { 40 case 0: 41 case -ERESTARTSYS: 42 case -EINTR: 43 out_msg = VM_FAULT_NOPAGE; 44 break; 45 46 case -ENOMEM: 47 out_msg = VM_FAULT_OOM; 48 break; 49 50 default: 51 out_msg = VM_FAULT_SIGBUS; 52 break; 53 } 54 55 return out_msg; 56 } 57 58 static int check_gem_flags(unsigned int flags) 59 { 60 if (flags & ~(EXYNOS_BO_MASK)) { 61 DRM_ERROR("invalid flags.\n"); 62 return -EINVAL; 63 } 64 65 return 0; 66 } 67 68 static void update_vm_cache_attr(struct exynos_drm_gem_obj *obj, 69 struct vm_area_struct *vma) 70 { 71 DRM_DEBUG_KMS("flags = 0x%x\n", obj->flags); 72 73 /* non-cachable as default. */ 74 if (obj->flags & EXYNOS_BO_CACHABLE) 75 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); 76 else if (obj->flags & EXYNOS_BO_WC) 77 vma->vm_page_prot = 78 pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); 79 else 80 vma->vm_page_prot = 81 pgprot_noncached(vm_get_page_prot(vma->vm_flags)); 82 } 83 84 static unsigned long roundup_gem_size(unsigned long size, unsigned int flags) 85 { 86 /* TODO */ 87 88 return roundup(size, PAGE_SIZE); 89 } 90 91 static int exynos_drm_gem_map_buf(struct drm_gem_object *obj, 92 struct vm_area_struct *vma, 93 unsigned long f_vaddr, 94 pgoff_t page_offset) 95 { 96 struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj); 97 struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer; 98 struct scatterlist *sgl; 99 unsigned long pfn; 100 int i; 101 102 if (!buf->sgt) 103 return -EINTR; 104 105 if (page_offset >= (buf->size >> PAGE_SHIFT)) { 106 DRM_ERROR("invalid page offset\n"); 107 return -EINVAL; 108 } 109 110 sgl = buf->sgt->sgl; 111 for_each_sg(buf->sgt->sgl, sgl, buf->sgt->nents, i) { 112 if (page_offset < (sgl->length >> PAGE_SHIFT)) 113 break; 114 page_offset -= (sgl->length >> PAGE_SHIFT); 115 } 116 117 pfn = __phys_to_pfn(sg_phys(sgl)) + page_offset; 118 119 return vm_insert_mixed(vma, f_vaddr, pfn); 120 } 121 122 static int exynos_drm_gem_handle_create(struct drm_gem_object *obj, 123 struct drm_file *file_priv, 124 unsigned int *handle) 125 { 126 int ret; 127 128 /* 129 * allocate a id of idr table where the obj is registered 130 * and handle has the id what user can see. 131 */ 132 ret = drm_gem_handle_create(file_priv, obj, handle); 133 if (ret) 134 return ret; 135 136 DRM_DEBUG_KMS("gem handle = 0x%x\n", *handle); 137 138 /* drop reference from allocate - handle holds it now. */ 139 drm_gem_object_unreference_unlocked(obj); 140 141 return 0; 142 } 143 144 void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj) 145 { 146 struct drm_gem_object *obj; 147 struct exynos_drm_gem_buf *buf; 148 149 DRM_DEBUG_KMS("%s\n", __FILE__); 150 151 obj = &exynos_gem_obj->base; 152 buf = exynos_gem_obj->buffer; 153 154 DRM_DEBUG_KMS("handle count = %d\n", atomic_read(&obj->handle_count)); 155 156 /* 157 * do not release memory region from exporter. 158 * 159 * the region will be released by exporter 160 * once dmabuf's refcount becomes 0. 161 */ 162 if (obj->import_attach) 163 goto out; 164 165 exynos_drm_free_buf(obj->dev, exynos_gem_obj->flags, buf); 166 167 out: 168 exynos_drm_fini_buf(obj->dev, buf); 169 exynos_gem_obj->buffer = NULL; 170 171 if (obj->map_list.map) 172 drm_gem_free_mmap_offset(obj); 173 174 /* release file pointer to gem object. */ 175 drm_gem_object_release(obj); 176 177 kfree(exynos_gem_obj); 178 exynos_gem_obj = NULL; 179 } 180 181 struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev, 182 unsigned long size) 183 { 184 struct exynos_drm_gem_obj *exynos_gem_obj; 185 struct drm_gem_object *obj; 186 int ret; 187 188 exynos_gem_obj = kzalloc(sizeof(*exynos_gem_obj), GFP_KERNEL); 189 if (!exynos_gem_obj) { 190 DRM_ERROR("failed to allocate exynos gem object\n"); 191 return NULL; 192 } 193 194 exynos_gem_obj->size = size; 195 obj = &exynos_gem_obj->base; 196 197 ret = drm_gem_object_init(dev, obj, size); 198 if (ret < 0) { 199 DRM_ERROR("failed to initialize gem object\n"); 200 kfree(exynos_gem_obj); 201 return NULL; 202 } 203 204 DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp); 205 206 return exynos_gem_obj; 207 } 208 209 struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev, 210 unsigned int flags, 211 unsigned long size) 212 { 213 struct exynos_drm_gem_obj *exynos_gem_obj; 214 struct exynos_drm_gem_buf *buf; 215 int ret; 216 217 if (!size) { 218 DRM_ERROR("invalid size.\n"); 219 return ERR_PTR(-EINVAL); 220 } 221 222 size = roundup_gem_size(size, flags); 223 DRM_DEBUG_KMS("%s\n", __FILE__); 224 225 ret = check_gem_flags(flags); 226 if (ret) 227 return ERR_PTR(ret); 228 229 buf = exynos_drm_init_buf(dev, size); 230 if (!buf) 231 return ERR_PTR(-ENOMEM); 232 233 exynos_gem_obj = exynos_drm_gem_init(dev, size); 234 if (!exynos_gem_obj) { 235 ret = -ENOMEM; 236 goto err_fini_buf; 237 } 238 239 exynos_gem_obj->buffer = buf; 240 241 /* set memory type and cache attribute from user side. */ 242 exynos_gem_obj->flags = flags; 243 244 ret = exynos_drm_alloc_buf(dev, buf, flags); 245 if (ret < 0) { 246 drm_gem_object_release(&exynos_gem_obj->base); 247 goto err_fini_buf; 248 } 249 250 return exynos_gem_obj; 251 252 err_fini_buf: 253 exynos_drm_fini_buf(dev, buf); 254 return ERR_PTR(ret); 255 } 256 257 int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data, 258 struct drm_file *file_priv) 259 { 260 struct drm_exynos_gem_create *args = data; 261 struct exynos_drm_gem_obj *exynos_gem_obj; 262 int ret; 263 264 DRM_DEBUG_KMS("%s\n", __FILE__); 265 266 exynos_gem_obj = exynos_drm_gem_create(dev, args->flags, args->size); 267 if (IS_ERR(exynos_gem_obj)) 268 return PTR_ERR(exynos_gem_obj); 269 270 ret = exynos_drm_gem_handle_create(&exynos_gem_obj->base, file_priv, 271 &args->handle); 272 if (ret) { 273 exynos_drm_gem_destroy(exynos_gem_obj); 274 return ret; 275 } 276 277 return 0; 278 } 279 280 dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev, 281 unsigned int gem_handle, 282 struct drm_file *filp) 283 { 284 struct exynos_drm_gem_obj *exynos_gem_obj; 285 struct drm_gem_object *obj; 286 287 obj = drm_gem_object_lookup(dev, filp, gem_handle); 288 if (!obj) { 289 DRM_ERROR("failed to lookup gem object.\n"); 290 return ERR_PTR(-EINVAL); 291 } 292 293 exynos_gem_obj = to_exynos_gem_obj(obj); 294 295 return &exynos_gem_obj->buffer->dma_addr; 296 } 297 298 void exynos_drm_gem_put_dma_addr(struct drm_device *dev, 299 unsigned int gem_handle, 300 struct drm_file *filp) 301 { 302 struct exynos_drm_gem_obj *exynos_gem_obj; 303 struct drm_gem_object *obj; 304 305 obj = drm_gem_object_lookup(dev, filp, gem_handle); 306 if (!obj) { 307 DRM_ERROR("failed to lookup gem object.\n"); 308 return; 309 } 310 311 exynos_gem_obj = to_exynos_gem_obj(obj); 312 313 drm_gem_object_unreference_unlocked(obj); 314 315 /* 316 * decrease obj->refcount one more time because we has already 317 * increased it at exynos_drm_gem_get_dma_addr(). 318 */ 319 drm_gem_object_unreference_unlocked(obj); 320 } 321 322 int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data, 323 struct drm_file *file_priv) 324 { 325 struct drm_exynos_gem_map_off *args = data; 326 327 DRM_DEBUG_KMS("%s\n", __FILE__); 328 329 DRM_DEBUG_KMS("handle = 0x%x, offset = 0x%lx\n", 330 args->handle, (unsigned long)args->offset); 331 332 if (!(dev->driver->driver_features & DRIVER_GEM)) { 333 DRM_ERROR("does not support GEM.\n"); 334 return -ENODEV; 335 } 336 337 return exynos_drm_gem_dumb_map_offset(file_priv, dev, args->handle, 338 &args->offset); 339 } 340 341 static struct drm_file *exynos_drm_find_drm_file(struct drm_device *drm_dev, 342 struct file *filp) 343 { 344 struct drm_file *file_priv; 345 346 mutex_lock(&drm_dev->struct_mutex); 347 348 /* find current process's drm_file from filelist. */ 349 list_for_each_entry(file_priv, &drm_dev->filelist, lhead) { 350 if (file_priv->filp == filp) { 351 mutex_unlock(&drm_dev->struct_mutex); 352 return file_priv; 353 } 354 } 355 356 mutex_unlock(&drm_dev->struct_mutex); 357 WARN_ON(1); 358 359 return ERR_PTR(-EFAULT); 360 } 361 362 static int exynos_drm_gem_mmap_buffer(struct file *filp, 363 struct vm_area_struct *vma) 364 { 365 struct drm_gem_object *obj = filp->private_data; 366 struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj); 367 struct drm_device *drm_dev = obj->dev; 368 struct exynos_drm_gem_buf *buffer; 369 struct drm_file *file_priv; 370 unsigned long vm_size; 371 int ret; 372 373 DRM_DEBUG_KMS("%s\n", __FILE__); 374 375 vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP; 376 vma->vm_private_data = obj; 377 vma->vm_ops = drm_dev->driver->gem_vm_ops; 378 379 /* restore it to driver's fops. */ 380 filp->f_op = fops_get(drm_dev->driver->fops); 381 382 file_priv = exynos_drm_find_drm_file(drm_dev, filp); 383 if (IS_ERR(file_priv)) 384 return PTR_ERR(file_priv); 385 386 /* restore it to drm_file. */ 387 filp->private_data = file_priv; 388 389 update_vm_cache_attr(exynos_gem_obj, vma); 390 391 vm_size = vma->vm_end - vma->vm_start; 392 393 /* 394 * a buffer contains information to physically continuous memory 395 * allocated by user request or at framebuffer creation. 396 */ 397 buffer = exynos_gem_obj->buffer; 398 399 /* check if user-requested size is valid. */ 400 if (vm_size > buffer->size) 401 return -EINVAL; 402 403 ret = dma_mmap_attrs(drm_dev->dev, vma, buffer->pages, 404 buffer->dma_addr, buffer->size, 405 &buffer->dma_attrs); 406 if (ret < 0) { 407 DRM_ERROR("failed to mmap.\n"); 408 return ret; 409 } 410 411 /* 412 * take a reference to this mapping of the object. And this reference 413 * is unreferenced by the corresponding vm_close call. 414 */ 415 drm_gem_object_reference(obj); 416 417 mutex_lock(&drm_dev->struct_mutex); 418 drm_vm_open_locked(drm_dev, vma); 419 mutex_unlock(&drm_dev->struct_mutex); 420 421 return 0; 422 } 423 424 static const struct file_operations exynos_drm_gem_fops = { 425 .mmap = exynos_drm_gem_mmap_buffer, 426 }; 427 428 int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data, 429 struct drm_file *file_priv) 430 { 431 struct drm_exynos_gem_mmap *args = data; 432 struct drm_gem_object *obj; 433 unsigned int addr; 434 435 DRM_DEBUG_KMS("%s\n", __FILE__); 436 437 if (!(dev->driver->driver_features & DRIVER_GEM)) { 438 DRM_ERROR("does not support GEM.\n"); 439 return -ENODEV; 440 } 441 442 obj = drm_gem_object_lookup(dev, file_priv, args->handle); 443 if (!obj) { 444 DRM_ERROR("failed to lookup gem object.\n"); 445 return -EINVAL; 446 } 447 448 /* 449 * Set specific mmper's fops. And it will be restored by 450 * exynos_drm_gem_mmap_buffer to dev->driver->fops. 451 * This is used to call specific mapper temporarily. 452 */ 453 file_priv->filp->f_op = &exynos_drm_gem_fops; 454 455 /* 456 * Set gem object to private_data so that specific mmaper 457 * can get the gem object. And it will be restored by 458 * exynos_drm_gem_mmap_buffer to drm_file. 459 */ 460 file_priv->filp->private_data = obj; 461 462 addr = vm_mmap(file_priv->filp, 0, args->size, 463 PROT_READ | PROT_WRITE, MAP_SHARED, 0); 464 465 drm_gem_object_unreference_unlocked(obj); 466 467 if (IS_ERR((void *)addr)) { 468 file_priv->filp->private_data = file_priv; 469 return PTR_ERR((void *)addr); 470 } 471 472 args->mapped = addr; 473 474 DRM_DEBUG_KMS("mapped = 0x%lx\n", (unsigned long)args->mapped); 475 476 return 0; 477 } 478 479 int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data, 480 struct drm_file *file_priv) 481 { struct exynos_drm_gem_obj *exynos_gem_obj; 482 struct drm_exynos_gem_info *args = data; 483 struct drm_gem_object *obj; 484 485 mutex_lock(&dev->struct_mutex); 486 487 obj = drm_gem_object_lookup(dev, file_priv, args->handle); 488 if (!obj) { 489 DRM_ERROR("failed to lookup gem object.\n"); 490 mutex_unlock(&dev->struct_mutex); 491 return -EINVAL; 492 } 493 494 exynos_gem_obj = to_exynos_gem_obj(obj); 495 496 args->flags = exynos_gem_obj->flags; 497 args->size = exynos_gem_obj->size; 498 499 drm_gem_object_unreference(obj); 500 mutex_unlock(&dev->struct_mutex); 501 502 return 0; 503 } 504 505 struct vm_area_struct *exynos_gem_get_vma(struct vm_area_struct *vma) 506 { 507 struct vm_area_struct *vma_copy; 508 509 vma_copy = kmalloc(sizeof(*vma_copy), GFP_KERNEL); 510 if (!vma_copy) 511 return NULL; 512 513 if (vma->vm_ops && vma->vm_ops->open) 514 vma->vm_ops->open(vma); 515 516 if (vma->vm_file) 517 get_file(vma->vm_file); 518 519 memcpy(vma_copy, vma, sizeof(*vma)); 520 521 vma_copy->vm_mm = NULL; 522 vma_copy->vm_next = NULL; 523 vma_copy->vm_prev = NULL; 524 525 return vma_copy; 526 } 527 528 void exynos_gem_put_vma(struct vm_area_struct *vma) 529 { 530 if (!vma) 531 return; 532 533 if (vma->vm_ops && vma->vm_ops->close) 534 vma->vm_ops->close(vma); 535 536 if (vma->vm_file) 537 fput(vma->vm_file); 538 539 kfree(vma); 540 } 541 542 int exynos_gem_get_pages_from_userptr(unsigned long start, 543 unsigned int npages, 544 struct page **pages, 545 struct vm_area_struct *vma) 546 { 547 int get_npages; 548 549 /* the memory region mmaped with VM_PFNMAP. */ 550 if (vma_is_io(vma)) { 551 unsigned int i; 552 553 for (i = 0; i < npages; ++i, start += PAGE_SIZE) { 554 unsigned long pfn; 555 int ret = follow_pfn(vma, start, &pfn); 556 if (ret) 557 return ret; 558 559 pages[i] = pfn_to_page(pfn); 560 } 561 562 if (i != npages) { 563 DRM_ERROR("failed to get user_pages.\n"); 564 return -EINVAL; 565 } 566 567 return 0; 568 } 569 570 get_npages = get_user_pages(current, current->mm, start, 571 npages, 1, 1, pages, NULL); 572 get_npages = max(get_npages, 0); 573 if (get_npages != npages) { 574 DRM_ERROR("failed to get user_pages.\n"); 575 while (get_npages) 576 put_page(pages[--get_npages]); 577 return -EFAULT; 578 } 579 580 return 0; 581 } 582 583 void exynos_gem_put_pages_to_userptr(struct page **pages, 584 unsigned int npages, 585 struct vm_area_struct *vma) 586 { 587 if (!vma_is_io(vma)) { 588 unsigned int i; 589 590 for (i = 0; i < npages; i++) { 591 set_page_dirty_lock(pages[i]); 592 593 /* 594 * undo the reference we took when populating 595 * the table. 596 */ 597 put_page(pages[i]); 598 } 599 } 600 } 601 602 int exynos_gem_map_sgt_with_dma(struct drm_device *drm_dev, 603 struct sg_table *sgt, 604 enum dma_data_direction dir) 605 { 606 int nents; 607 608 mutex_lock(&drm_dev->struct_mutex); 609 610 nents = dma_map_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir); 611 if (!nents) { 612 DRM_ERROR("failed to map sgl with dma.\n"); 613 mutex_unlock(&drm_dev->struct_mutex); 614 return nents; 615 } 616 617 mutex_unlock(&drm_dev->struct_mutex); 618 return 0; 619 } 620 621 void exynos_gem_unmap_sgt_from_dma(struct drm_device *drm_dev, 622 struct sg_table *sgt, 623 enum dma_data_direction dir) 624 { 625 dma_unmap_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir); 626 } 627 628 int exynos_drm_gem_init_object(struct drm_gem_object *obj) 629 { 630 DRM_DEBUG_KMS("%s\n", __FILE__); 631 632 return 0; 633 } 634 635 void exynos_drm_gem_free_object(struct drm_gem_object *obj) 636 { 637 struct exynos_drm_gem_obj *exynos_gem_obj; 638 struct exynos_drm_gem_buf *buf; 639 640 DRM_DEBUG_KMS("%s\n", __FILE__); 641 642 exynos_gem_obj = to_exynos_gem_obj(obj); 643 buf = exynos_gem_obj->buffer; 644 645 if (obj->import_attach) 646 drm_prime_gem_destroy(obj, buf->sgt); 647 648 exynos_drm_gem_destroy(to_exynos_gem_obj(obj)); 649 } 650 651 int exynos_drm_gem_dumb_create(struct drm_file *file_priv, 652 struct drm_device *dev, 653 struct drm_mode_create_dumb *args) 654 { 655 struct exynos_drm_gem_obj *exynos_gem_obj; 656 int ret; 657 658 DRM_DEBUG_KMS("%s\n", __FILE__); 659 660 /* 661 * alocate memory to be used for framebuffer. 662 * - this callback would be called by user application 663 * with DRM_IOCTL_MODE_CREATE_DUMB command. 664 */ 665 666 args->pitch = args->width * ((args->bpp + 7) / 8); 667 args->size = args->pitch * args->height; 668 669 exynos_gem_obj = exynos_drm_gem_create(dev, args->flags, args->size); 670 if (IS_ERR(exynos_gem_obj)) 671 return PTR_ERR(exynos_gem_obj); 672 673 ret = exynos_drm_gem_handle_create(&exynos_gem_obj->base, file_priv, 674 &args->handle); 675 if (ret) { 676 exynos_drm_gem_destroy(exynos_gem_obj); 677 return ret; 678 } 679 680 return 0; 681 } 682 683 int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv, 684 struct drm_device *dev, uint32_t handle, 685 uint64_t *offset) 686 { 687 struct drm_gem_object *obj; 688 int ret = 0; 689 690 DRM_DEBUG_KMS("%s\n", __FILE__); 691 692 mutex_lock(&dev->struct_mutex); 693 694 /* 695 * get offset of memory allocated for drm framebuffer. 696 * - this callback would be called by user application 697 * with DRM_IOCTL_MODE_MAP_DUMB command. 698 */ 699 700 obj = drm_gem_object_lookup(dev, file_priv, handle); 701 if (!obj) { 702 DRM_ERROR("failed to lookup gem object.\n"); 703 ret = -EINVAL; 704 goto unlock; 705 } 706 707 if (!obj->map_list.map) { 708 ret = drm_gem_create_mmap_offset(obj); 709 if (ret) 710 goto out; 711 } 712 713 *offset = (u64)obj->map_list.hash.key << PAGE_SHIFT; 714 DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset); 715 716 out: 717 drm_gem_object_unreference(obj); 718 unlock: 719 mutex_unlock(&dev->struct_mutex); 720 return ret; 721 } 722 723 int exynos_drm_gem_dumb_destroy(struct drm_file *file_priv, 724 struct drm_device *dev, 725 unsigned int handle) 726 { 727 int ret; 728 729 DRM_DEBUG_KMS("%s\n", __FILE__); 730 731 /* 732 * obj->refcount and obj->handle_count are decreased and 733 * if both them are 0 then exynos_drm_gem_free_object() 734 * would be called by callback to release resources. 735 */ 736 ret = drm_gem_handle_delete(file_priv, handle); 737 if (ret < 0) { 738 DRM_ERROR("failed to delete drm_gem_handle.\n"); 739 return ret; 740 } 741 742 return 0; 743 } 744 745 int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 746 { 747 struct drm_gem_object *obj = vma->vm_private_data; 748 struct drm_device *dev = obj->dev; 749 unsigned long f_vaddr; 750 pgoff_t page_offset; 751 int ret; 752 753 page_offset = ((unsigned long)vmf->virtual_address - 754 vma->vm_start) >> PAGE_SHIFT; 755 f_vaddr = (unsigned long)vmf->virtual_address; 756 757 mutex_lock(&dev->struct_mutex); 758 759 ret = exynos_drm_gem_map_buf(obj, vma, f_vaddr, page_offset); 760 if (ret < 0) 761 DRM_ERROR("failed to map a buffer with user.\n"); 762 763 mutex_unlock(&dev->struct_mutex); 764 765 return convert_to_vm_err_msg(ret); 766 } 767 768 int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) 769 { 770 struct exynos_drm_gem_obj *exynos_gem_obj; 771 struct drm_gem_object *obj; 772 int ret; 773 774 DRM_DEBUG_KMS("%s\n", __FILE__); 775 776 /* set vm_area_struct. */ 777 ret = drm_gem_mmap(filp, vma); 778 if (ret < 0) { 779 DRM_ERROR("failed to mmap.\n"); 780 return ret; 781 } 782 783 obj = vma->vm_private_data; 784 exynos_gem_obj = to_exynos_gem_obj(obj); 785 786 ret = check_gem_flags(exynos_gem_obj->flags); 787 if (ret) { 788 drm_gem_vm_close(vma); 789 drm_gem_free_mmap_offset(obj); 790 return ret; 791 } 792 793 vma->vm_flags &= ~VM_PFNMAP; 794 vma->vm_flags |= VM_MIXEDMAP; 795 796 update_vm_cache_attr(exynos_gem_obj, vma); 797 798 return ret; 799 } 800