1 /* 2 * Copyright © 2008 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 * 26 */ 27 28 #include <linux/types.h> 29 #include <linux/slab.h> 30 #include <linux/mm.h> 31 #include <linux/uaccess.h> 32 #include <linux/fs.h> 33 #include <linux/file.h> 34 #include <linux/module.h> 35 #include <linux/mman.h> 36 #include <linux/pagemap.h> 37 #include "drmP.h" 38 39 /** @file drm_gem.c 40 * 41 * This file provides some of the base ioctls and library routines for 42 * the graphics memory manager implemented by each device driver. 43 * 44 * Because various devices have different requirements in terms of 45 * synchronization and migration strategies, implementing that is left up to 46 * the driver, and all that the general API provides should be generic -- 47 * allocating objects, reading/writing data with the cpu, freeing objects. 48 * Even there, platform-dependent optimizations for reading/writing data with 49 * the CPU mean we'll likely hook those out to driver-specific calls. However, 50 * the DRI2 implementation wants to have at least allocate/mmap be generic. 51 * 52 * The goal was to have swap-backed object allocation managed through 53 * struct file. However, file descriptors as handles to a struct file have 54 * two major failings: 55 * - Process limits prevent more than 1024 or so being used at a time by 56 * default. 57 * - Inability to allocate high fds will aggravate the X Server's select() 58 * handling, and likely that of many GL client applications as well. 59 * 60 * This led to a plan of using our own integer IDs (called handles, following 61 * DRM terminology) to mimic fds, and implement the fd syscalls we need as 62 * ioctls. The objects themselves will still include the struct file so 63 * that we can transition to fds if the required kernel infrastructure shows 64 * up at a later date, and as our interface with shmfs for memory allocation. 65 */ 66 67 /* 68 * We make up offsets for buffer objects so we can recognize them at 69 * mmap time. 70 */ 71 72 /* pgoff in mmap is an unsigned long, so we need to make sure that 73 * the faked up offset will fit 74 */ 75 76 #if BITS_PER_LONG == 64 77 #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1) 78 #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16) 79 #else 80 #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFUL >> PAGE_SHIFT) + 1) 81 #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFUL >> PAGE_SHIFT) * 16) 82 #endif 83 84 /** 85 * Initialize the GEM device fields 86 */ 87 88 int 89 drm_gem_init(struct drm_device *dev) 90 { 91 struct drm_gem_mm *mm; 92 93 spin_lock_init(&dev->object_name_lock); 94 idr_init(&dev->object_name_idr); 95 atomic_set(&dev->object_count, 0); 96 atomic_set(&dev->object_memory, 0); 97 atomic_set(&dev->pin_count, 0); 98 atomic_set(&dev->pin_memory, 0); 99 atomic_set(&dev->gtt_count, 0); 100 atomic_set(&dev->gtt_memory, 0); 101 102 mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL); 103 if (!mm) { 104 DRM_ERROR("out of memory\n"); 105 return -ENOMEM; 106 } 107 108 dev->mm_private = mm; 109 110 if (drm_ht_create(&mm->offset_hash, 19)) { 111 kfree(mm); 112 return -ENOMEM; 113 } 114 115 if (drm_mm_init(&mm->offset_manager, DRM_FILE_PAGE_OFFSET_START, 116 DRM_FILE_PAGE_OFFSET_SIZE)) { 117 drm_ht_remove(&mm->offset_hash); 118 kfree(mm); 119 return -ENOMEM; 120 } 121 122 return 0; 123 } 124 125 void 126 drm_gem_destroy(struct drm_device *dev) 127 { 128 struct drm_gem_mm *mm = dev->mm_private; 129 130 drm_mm_takedown(&mm->offset_manager); 131 drm_ht_remove(&mm->offset_hash); 132 kfree(mm); 133 dev->mm_private = NULL; 134 } 135 136 /** 137 * Initialize an already allocate GEM object of the specified size with 138 * shmfs backing store. 139 */ 140 int drm_gem_object_init(struct drm_device *dev, 141 struct drm_gem_object *obj, size_t size) 142 { 143 BUG_ON((size & (PAGE_SIZE - 1)) != 0); 144 145 obj->dev = dev; 146 obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE); 147 if (IS_ERR(obj->filp)) 148 return -ENOMEM; 149 150 kref_init(&obj->refcount); 151 kref_init(&obj->handlecount); 152 obj->size = size; 153 154 atomic_inc(&dev->object_count); 155 atomic_add(obj->size, &dev->object_memory); 156 157 return 0; 158 } 159 EXPORT_SYMBOL(drm_gem_object_init); 160 161 /** 162 * Allocate a GEM object of the specified size with shmfs backing store 163 */ 164 struct drm_gem_object * 165 drm_gem_object_alloc(struct drm_device *dev, size_t size) 166 { 167 struct drm_gem_object *obj; 168 169 obj = kzalloc(sizeof(*obj), GFP_KERNEL); 170 if (!obj) 171 goto free; 172 173 if (drm_gem_object_init(dev, obj, size) != 0) 174 goto free; 175 176 if (dev->driver->gem_init_object != NULL && 177 dev->driver->gem_init_object(obj) != 0) { 178 goto fput; 179 } 180 return obj; 181 fput: 182 /* Object_init mangles the global counters - readjust them. */ 183 atomic_dec(&dev->object_count); 184 atomic_sub(obj->size, &dev->object_memory); 185 fput(obj->filp); 186 free: 187 kfree(obj); 188 return NULL; 189 } 190 EXPORT_SYMBOL(drm_gem_object_alloc); 191 192 /** 193 * Removes the mapping from handle to filp for this object. 194 */ 195 static int 196 drm_gem_handle_delete(struct drm_file *filp, u32 handle) 197 { 198 struct drm_device *dev; 199 struct drm_gem_object *obj; 200 201 /* This is gross. The idr system doesn't let us try a delete and 202 * return an error code. It just spews if you fail at deleting. 203 * So, we have to grab a lock around finding the object and then 204 * doing the delete on it and dropping the refcount, or the user 205 * could race us to double-decrement the refcount and cause a 206 * use-after-free later. Given the frequency of our handle lookups, 207 * we may want to use ida for number allocation and a hash table 208 * for the pointers, anyway. 209 */ 210 spin_lock(&filp->table_lock); 211 212 /* Check if we currently have a reference on the object */ 213 obj = idr_find(&filp->object_idr, handle); 214 if (obj == NULL) { 215 spin_unlock(&filp->table_lock); 216 return -EINVAL; 217 } 218 dev = obj->dev; 219 220 /* Release reference and decrement refcount. */ 221 idr_remove(&filp->object_idr, handle); 222 spin_unlock(&filp->table_lock); 223 224 drm_gem_object_handle_unreference_unlocked(obj); 225 226 return 0; 227 } 228 229 /** 230 * Create a handle for this object. This adds a handle reference 231 * to the object, which includes a regular reference count. Callers 232 * will likely want to dereference the object afterwards. 233 */ 234 int 235 drm_gem_handle_create(struct drm_file *file_priv, 236 struct drm_gem_object *obj, 237 u32 *handlep) 238 { 239 int ret; 240 241 /* 242 * Get the user-visible handle using idr. 243 */ 244 again: 245 /* ensure there is space available to allocate a handle */ 246 if (idr_pre_get(&file_priv->object_idr, GFP_KERNEL) == 0) 247 return -ENOMEM; 248 249 /* do the allocation under our spinlock */ 250 spin_lock(&file_priv->table_lock); 251 ret = idr_get_new_above(&file_priv->object_idr, obj, 1, (int *)handlep); 252 spin_unlock(&file_priv->table_lock); 253 if (ret == -EAGAIN) 254 goto again; 255 256 if (ret != 0) 257 return ret; 258 259 drm_gem_object_handle_reference(obj); 260 return 0; 261 } 262 EXPORT_SYMBOL(drm_gem_handle_create); 263 264 /** Returns a reference to the object named by the handle. */ 265 struct drm_gem_object * 266 drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp, 267 u32 handle) 268 { 269 struct drm_gem_object *obj; 270 271 spin_lock(&filp->table_lock); 272 273 /* Check if we currently have a reference on the object */ 274 obj = idr_find(&filp->object_idr, handle); 275 if (obj == NULL) { 276 spin_unlock(&filp->table_lock); 277 return NULL; 278 } 279 280 drm_gem_object_reference(obj); 281 282 spin_unlock(&filp->table_lock); 283 284 return obj; 285 } 286 EXPORT_SYMBOL(drm_gem_object_lookup); 287 288 /** 289 * Releases the handle to an mm object. 290 */ 291 int 292 drm_gem_close_ioctl(struct drm_device *dev, void *data, 293 struct drm_file *file_priv) 294 { 295 struct drm_gem_close *args = data; 296 int ret; 297 298 if (!(dev->driver->driver_features & DRIVER_GEM)) 299 return -ENODEV; 300 301 ret = drm_gem_handle_delete(file_priv, args->handle); 302 303 return ret; 304 } 305 306 /** 307 * Create a global name for an object, returning the name. 308 * 309 * Note that the name does not hold a reference; when the object 310 * is freed, the name goes away. 311 */ 312 int 313 drm_gem_flink_ioctl(struct drm_device *dev, void *data, 314 struct drm_file *file_priv) 315 { 316 struct drm_gem_flink *args = data; 317 struct drm_gem_object *obj; 318 int ret; 319 320 if (!(dev->driver->driver_features & DRIVER_GEM)) 321 return -ENODEV; 322 323 obj = drm_gem_object_lookup(dev, file_priv, args->handle); 324 if (obj == NULL) 325 return -ENOENT; 326 327 again: 328 if (idr_pre_get(&dev->object_name_idr, GFP_KERNEL) == 0) { 329 ret = -ENOMEM; 330 goto err; 331 } 332 333 spin_lock(&dev->object_name_lock); 334 if (!obj->name) { 335 ret = idr_get_new_above(&dev->object_name_idr, obj, 1, 336 &obj->name); 337 args->name = (uint64_t) obj->name; 338 spin_unlock(&dev->object_name_lock); 339 340 if (ret == -EAGAIN) 341 goto again; 342 343 if (ret != 0) 344 goto err; 345 346 /* Allocate a reference for the name table. */ 347 drm_gem_object_reference(obj); 348 } else { 349 args->name = (uint64_t) obj->name; 350 spin_unlock(&dev->object_name_lock); 351 ret = 0; 352 } 353 354 err: 355 drm_gem_object_unreference_unlocked(obj); 356 return ret; 357 } 358 359 /** 360 * Open an object using the global name, returning a handle and the size. 361 * 362 * This handle (of course) holds a reference to the object, so the object 363 * will not go away until the handle is deleted. 364 */ 365 int 366 drm_gem_open_ioctl(struct drm_device *dev, void *data, 367 struct drm_file *file_priv) 368 { 369 struct drm_gem_open *args = data; 370 struct drm_gem_object *obj; 371 int ret; 372 u32 handle; 373 374 if (!(dev->driver->driver_features & DRIVER_GEM)) 375 return -ENODEV; 376 377 spin_lock(&dev->object_name_lock); 378 obj = idr_find(&dev->object_name_idr, (int) args->name); 379 if (obj) 380 drm_gem_object_reference(obj); 381 spin_unlock(&dev->object_name_lock); 382 if (!obj) 383 return -ENOENT; 384 385 ret = drm_gem_handle_create(file_priv, obj, &handle); 386 drm_gem_object_unreference_unlocked(obj); 387 if (ret) 388 return ret; 389 390 args->handle = handle; 391 args->size = obj->size; 392 393 return 0; 394 } 395 396 /** 397 * Called at device open time, sets up the structure for handling refcounting 398 * of mm objects. 399 */ 400 void 401 drm_gem_open(struct drm_device *dev, struct drm_file *file_private) 402 { 403 idr_init(&file_private->object_idr); 404 spin_lock_init(&file_private->table_lock); 405 } 406 407 /** 408 * Called at device close to release the file's 409 * handle references on objects. 410 */ 411 static int 412 drm_gem_object_release_handle(int id, void *ptr, void *data) 413 { 414 struct drm_gem_object *obj = ptr; 415 416 drm_gem_object_handle_unreference_unlocked(obj); 417 418 return 0; 419 } 420 421 /** 422 * Called at close time when the filp is going away. 423 * 424 * Releases any remaining references on objects by this filp. 425 */ 426 void 427 drm_gem_release(struct drm_device *dev, struct drm_file *file_private) 428 { 429 idr_for_each(&file_private->object_idr, 430 &drm_gem_object_release_handle, NULL); 431 432 idr_remove_all(&file_private->object_idr); 433 idr_destroy(&file_private->object_idr); 434 } 435 436 void 437 drm_gem_object_release(struct drm_gem_object *obj) 438 { 439 struct drm_device *dev = obj->dev; 440 fput(obj->filp); 441 atomic_dec(&dev->object_count); 442 atomic_sub(obj->size, &dev->object_memory); 443 } 444 EXPORT_SYMBOL(drm_gem_object_release); 445 446 /** 447 * Called after the last reference to the object has been lost. 448 * Must be called holding struct_ mutex 449 * 450 * Frees the object 451 */ 452 void 453 drm_gem_object_free(struct kref *kref) 454 { 455 struct drm_gem_object *obj = (struct drm_gem_object *) kref; 456 struct drm_device *dev = obj->dev; 457 458 BUG_ON(!mutex_is_locked(&dev->struct_mutex)); 459 460 if (dev->driver->gem_free_object != NULL) 461 dev->driver->gem_free_object(obj); 462 } 463 EXPORT_SYMBOL(drm_gem_object_free); 464 465 /** 466 * Called after the last reference to the object has been lost. 467 * Must be called without holding struct_mutex 468 * 469 * Frees the object 470 */ 471 void 472 drm_gem_object_free_unlocked(struct kref *kref) 473 { 474 struct drm_gem_object *obj = (struct drm_gem_object *) kref; 475 struct drm_device *dev = obj->dev; 476 477 if (dev->driver->gem_free_object_unlocked != NULL) 478 dev->driver->gem_free_object_unlocked(obj); 479 else if (dev->driver->gem_free_object != NULL) { 480 mutex_lock(&dev->struct_mutex); 481 dev->driver->gem_free_object(obj); 482 mutex_unlock(&dev->struct_mutex); 483 } 484 } 485 EXPORT_SYMBOL(drm_gem_object_free_unlocked); 486 487 static void drm_gem_object_ref_bug(struct kref *list_kref) 488 { 489 BUG(); 490 } 491 492 /** 493 * Called after the last handle to the object has been closed 494 * 495 * Removes any name for the object. Note that this must be 496 * called before drm_gem_object_free or we'll be touching 497 * freed memory 498 */ 499 void 500 drm_gem_object_handle_free(struct kref *kref) 501 { 502 struct drm_gem_object *obj = container_of(kref, 503 struct drm_gem_object, 504 handlecount); 505 struct drm_device *dev = obj->dev; 506 507 /* Remove any name for this object */ 508 spin_lock(&dev->object_name_lock); 509 if (obj->name) { 510 idr_remove(&dev->object_name_idr, obj->name); 511 obj->name = 0; 512 spin_unlock(&dev->object_name_lock); 513 /* 514 * The object name held a reference to this object, drop 515 * that now. 516 * 517 * This cannot be the last reference, since the handle holds one too. 518 */ 519 kref_put(&obj->refcount, drm_gem_object_ref_bug); 520 } else 521 spin_unlock(&dev->object_name_lock); 522 523 } 524 EXPORT_SYMBOL(drm_gem_object_handle_free); 525 526 void drm_gem_vm_open(struct vm_area_struct *vma) 527 { 528 struct drm_gem_object *obj = vma->vm_private_data; 529 530 drm_gem_object_reference(obj); 531 } 532 EXPORT_SYMBOL(drm_gem_vm_open); 533 534 void drm_gem_vm_close(struct vm_area_struct *vma) 535 { 536 struct drm_gem_object *obj = vma->vm_private_data; 537 538 drm_gem_object_unreference_unlocked(obj); 539 } 540 EXPORT_SYMBOL(drm_gem_vm_close); 541 542 543 /** 544 * drm_gem_mmap - memory map routine for GEM objects 545 * @filp: DRM file pointer 546 * @vma: VMA for the area to be mapped 547 * 548 * If a driver supports GEM object mapping, mmap calls on the DRM file 549 * descriptor will end up here. 550 * 551 * If we find the object based on the offset passed in (vma->vm_pgoff will 552 * contain the fake offset we created when the GTT map ioctl was called on 553 * the object), we set up the driver fault handler so that any accesses 554 * to the object can be trapped, to perform migration, GTT binding, surface 555 * register allocation, or performance monitoring. 556 */ 557 int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) 558 { 559 struct drm_file *priv = filp->private_data; 560 struct drm_device *dev = priv->minor->dev; 561 struct drm_gem_mm *mm = dev->mm_private; 562 struct drm_local_map *map = NULL; 563 struct drm_gem_object *obj; 564 struct drm_hash_item *hash; 565 int ret = 0; 566 567 mutex_lock(&dev->struct_mutex); 568 569 if (drm_ht_find_item(&mm->offset_hash, vma->vm_pgoff, &hash)) { 570 mutex_unlock(&dev->struct_mutex); 571 return drm_mmap(filp, vma); 572 } 573 574 map = drm_hash_entry(hash, struct drm_map_list, hash)->map; 575 if (!map || 576 ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN))) { 577 ret = -EPERM; 578 goto out_unlock; 579 } 580 581 /* Check for valid size. */ 582 if (map->size < vma->vm_end - vma->vm_start) { 583 ret = -EINVAL; 584 goto out_unlock; 585 } 586 587 obj = map->handle; 588 if (!obj->dev->driver->gem_vm_ops) { 589 ret = -EINVAL; 590 goto out_unlock; 591 } 592 593 vma->vm_flags |= VM_RESERVED | VM_IO | VM_PFNMAP | VM_DONTEXPAND; 594 vma->vm_ops = obj->dev->driver->gem_vm_ops; 595 vma->vm_private_data = map->handle; 596 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); 597 598 /* Take a ref for this mapping of the object, so that the fault 599 * handler can dereference the mmap offset's pointer to the object. 600 * This reference is cleaned up by the corresponding vm_close 601 * (which should happen whether the vma was created by this call, or 602 * by a vm_open due to mremap or partial unmap or whatever). 603 */ 604 drm_gem_object_reference(obj); 605 606 vma->vm_file = filp; /* Needed for drm_vm_open() */ 607 drm_vm_open_locked(vma); 608 609 out_unlock: 610 mutex_unlock(&dev->struct_mutex); 611 612 return ret; 613 } 614 EXPORT_SYMBOL(drm_gem_mmap); 615