1 /* 2 * Copyright © 2008 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 * 26 */ 27 28 #include <linux/types.h> 29 #include <linux/slab.h> 30 #include <linux/mm.h> 31 #include <linux/uaccess.h> 32 #include <linux/fs.h> 33 #include <linux/file.h> 34 #include <linux/module.h> 35 #include <linux/mman.h> 36 #include <linux/pagemap.h> 37 #include "drmP.h" 38 39 /** @file drm_gem.c 40 * 41 * This file provides some of the base ioctls and library routines for 42 * the graphics memory manager implemented by each device driver. 43 * 44 * Because various devices have different requirements in terms of 45 * synchronization and migration strategies, implementing that is left up to 46 * the driver, and all that the general API provides should be generic -- 47 * allocating objects, reading/writing data with the cpu, freeing objects. 48 * Even there, platform-dependent optimizations for reading/writing data with 49 * the CPU mean we'll likely hook those out to driver-specific calls. However, 50 * the DRI2 implementation wants to have at least allocate/mmap be generic. 51 * 52 * The goal was to have swap-backed object allocation managed through 53 * struct file. However, file descriptors as handles to a struct file have 54 * two major failings: 55 * - Process limits prevent more than 1024 or so being used at a time by 56 * default. 57 * - Inability to allocate high fds will aggravate the X Server's select() 58 * handling, and likely that of many GL client applications as well. 59 * 60 * This led to a plan of using our own integer IDs (called handles, following 61 * DRM terminology) to mimic fds, and implement the fd syscalls we need as 62 * ioctls. The objects themselves will still include the struct file so 63 * that we can transition to fds if the required kernel infrastructure shows 64 * up at a later date, and as our interface with shmfs for memory allocation. 65 */ 66 67 /* 68 * We make up offsets for buffer objects so we can recognize them at 69 * mmap time. 70 */ 71 #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1) 72 #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16) 73 74 /** 75 * Initialize the GEM device fields 76 */ 77 78 int 79 drm_gem_init(struct drm_device *dev) 80 { 81 struct drm_gem_mm *mm; 82 83 spin_lock_init(&dev->object_name_lock); 84 idr_init(&dev->object_name_idr); 85 atomic_set(&dev->object_count, 0); 86 atomic_set(&dev->object_memory, 0); 87 atomic_set(&dev->pin_count, 0); 88 atomic_set(&dev->pin_memory, 0); 89 atomic_set(&dev->gtt_count, 0); 90 atomic_set(&dev->gtt_memory, 0); 91 92 mm = drm_calloc(1, sizeof(struct drm_gem_mm), DRM_MEM_MM); 93 if (!mm) { 94 DRM_ERROR("out of memory\n"); 95 return -ENOMEM; 96 } 97 98 dev->mm_private = mm; 99 100 if (drm_ht_create(&mm->offset_hash, 19)) { 101 drm_free(mm, sizeof(struct drm_gem_mm), DRM_MEM_MM); 102 return -ENOMEM; 103 } 104 105 if (drm_mm_init(&mm->offset_manager, DRM_FILE_PAGE_OFFSET_START, 106 DRM_FILE_PAGE_OFFSET_SIZE)) { 107 drm_free(mm, sizeof(struct drm_gem_mm), DRM_MEM_MM); 108 drm_ht_remove(&mm->offset_hash); 109 return -ENOMEM; 110 } 111 112 return 0; 113 } 114 115 void 116 drm_gem_destroy(struct drm_device *dev) 117 { 118 struct drm_gem_mm *mm = dev->mm_private; 119 120 drm_mm_takedown(&mm->offset_manager); 121 drm_ht_remove(&mm->offset_hash); 122 drm_free(mm, sizeof(struct drm_gem_mm), DRM_MEM_MM); 123 dev->mm_private = NULL; 124 } 125 126 /** 127 * Allocate a GEM object of the specified size with shmfs backing store 128 */ 129 struct drm_gem_object * 130 drm_gem_object_alloc(struct drm_device *dev, size_t size) 131 { 132 struct drm_gem_object *obj; 133 134 BUG_ON((size & (PAGE_SIZE - 1)) != 0); 135 136 obj = kcalloc(1, sizeof(*obj), GFP_KERNEL); 137 138 obj->dev = dev; 139 obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE); 140 if (IS_ERR(obj->filp)) { 141 kfree(obj); 142 return NULL; 143 } 144 145 kref_init(&obj->refcount); 146 kref_init(&obj->handlecount); 147 obj->size = size; 148 if (dev->driver->gem_init_object != NULL && 149 dev->driver->gem_init_object(obj) != 0) { 150 fput(obj->filp); 151 kfree(obj); 152 return NULL; 153 } 154 atomic_inc(&dev->object_count); 155 atomic_add(obj->size, &dev->object_memory); 156 return obj; 157 } 158 EXPORT_SYMBOL(drm_gem_object_alloc); 159 160 /** 161 * Removes the mapping from handle to filp for this object. 162 */ 163 static int 164 drm_gem_handle_delete(struct drm_file *filp, int handle) 165 { 166 struct drm_device *dev; 167 struct drm_gem_object *obj; 168 169 /* This is gross. The idr system doesn't let us try a delete and 170 * return an error code. It just spews if you fail at deleting. 171 * So, we have to grab a lock around finding the object and then 172 * doing the delete on it and dropping the refcount, or the user 173 * could race us to double-decrement the refcount and cause a 174 * use-after-free later. Given the frequency of our handle lookups, 175 * we may want to use ida for number allocation and a hash table 176 * for the pointers, anyway. 177 */ 178 spin_lock(&filp->table_lock); 179 180 /* Check if we currently have a reference on the object */ 181 obj = idr_find(&filp->object_idr, handle); 182 if (obj == NULL) { 183 spin_unlock(&filp->table_lock); 184 return -EINVAL; 185 } 186 dev = obj->dev; 187 188 /* Release reference and decrement refcount. */ 189 idr_remove(&filp->object_idr, handle); 190 spin_unlock(&filp->table_lock); 191 192 mutex_lock(&dev->struct_mutex); 193 drm_gem_object_handle_unreference(obj); 194 mutex_unlock(&dev->struct_mutex); 195 196 return 0; 197 } 198 199 /** 200 * Create a handle for this object. This adds a handle reference 201 * to the object, which includes a regular reference count. Callers 202 * will likely want to dereference the object afterwards. 203 */ 204 int 205 drm_gem_handle_create(struct drm_file *file_priv, 206 struct drm_gem_object *obj, 207 int *handlep) 208 { 209 int ret; 210 211 /* 212 * Get the user-visible handle using idr. 213 */ 214 again: 215 /* ensure there is space available to allocate a handle */ 216 if (idr_pre_get(&file_priv->object_idr, GFP_KERNEL) == 0) 217 return -ENOMEM; 218 219 /* do the allocation under our spinlock */ 220 spin_lock(&file_priv->table_lock); 221 ret = idr_get_new_above(&file_priv->object_idr, obj, 1, handlep); 222 spin_unlock(&file_priv->table_lock); 223 if (ret == -EAGAIN) 224 goto again; 225 226 if (ret != 0) 227 return ret; 228 229 drm_gem_object_handle_reference(obj); 230 return 0; 231 } 232 EXPORT_SYMBOL(drm_gem_handle_create); 233 234 /** Returns a reference to the object named by the handle. */ 235 struct drm_gem_object * 236 drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp, 237 int handle) 238 { 239 struct drm_gem_object *obj; 240 241 spin_lock(&filp->table_lock); 242 243 /* Check if we currently have a reference on the object */ 244 obj = idr_find(&filp->object_idr, handle); 245 if (obj == NULL) { 246 spin_unlock(&filp->table_lock); 247 return NULL; 248 } 249 250 drm_gem_object_reference(obj); 251 252 spin_unlock(&filp->table_lock); 253 254 return obj; 255 } 256 EXPORT_SYMBOL(drm_gem_object_lookup); 257 258 /** 259 * Releases the handle to an mm object. 260 */ 261 int 262 drm_gem_close_ioctl(struct drm_device *dev, void *data, 263 struct drm_file *file_priv) 264 { 265 struct drm_gem_close *args = data; 266 int ret; 267 268 if (!(dev->driver->driver_features & DRIVER_GEM)) 269 return -ENODEV; 270 271 ret = drm_gem_handle_delete(file_priv, args->handle); 272 273 return ret; 274 } 275 276 /** 277 * Create a global name for an object, returning the name. 278 * 279 * Note that the name does not hold a reference; when the object 280 * is freed, the name goes away. 281 */ 282 int 283 drm_gem_flink_ioctl(struct drm_device *dev, void *data, 284 struct drm_file *file_priv) 285 { 286 struct drm_gem_flink *args = data; 287 struct drm_gem_object *obj; 288 int ret; 289 290 if (!(dev->driver->driver_features & DRIVER_GEM)) 291 return -ENODEV; 292 293 obj = drm_gem_object_lookup(dev, file_priv, args->handle); 294 if (obj == NULL) 295 return -EBADF; 296 297 again: 298 if (idr_pre_get(&dev->object_name_idr, GFP_KERNEL) == 0) 299 return -ENOMEM; 300 301 spin_lock(&dev->object_name_lock); 302 if (obj->name) { 303 args->name = obj->name; 304 spin_unlock(&dev->object_name_lock); 305 return 0; 306 } 307 ret = idr_get_new_above(&dev->object_name_idr, obj, 1, 308 &obj->name); 309 spin_unlock(&dev->object_name_lock); 310 if (ret == -EAGAIN) 311 goto again; 312 313 if (ret != 0) { 314 mutex_lock(&dev->struct_mutex); 315 drm_gem_object_unreference(obj); 316 mutex_unlock(&dev->struct_mutex); 317 return ret; 318 } 319 320 /* 321 * Leave the reference from the lookup around as the 322 * name table now holds one 323 */ 324 args->name = (uint64_t) obj->name; 325 326 return 0; 327 } 328 329 /** 330 * Open an object using the global name, returning a handle and the size. 331 * 332 * This handle (of course) holds a reference to the object, so the object 333 * will not go away until the handle is deleted. 334 */ 335 int 336 drm_gem_open_ioctl(struct drm_device *dev, void *data, 337 struct drm_file *file_priv) 338 { 339 struct drm_gem_open *args = data; 340 struct drm_gem_object *obj; 341 int ret; 342 int handle; 343 344 if (!(dev->driver->driver_features & DRIVER_GEM)) 345 return -ENODEV; 346 347 spin_lock(&dev->object_name_lock); 348 obj = idr_find(&dev->object_name_idr, (int) args->name); 349 if (obj) 350 drm_gem_object_reference(obj); 351 spin_unlock(&dev->object_name_lock); 352 if (!obj) 353 return -ENOENT; 354 355 ret = drm_gem_handle_create(file_priv, obj, &handle); 356 mutex_lock(&dev->struct_mutex); 357 drm_gem_object_unreference(obj); 358 mutex_unlock(&dev->struct_mutex); 359 if (ret) 360 return ret; 361 362 args->handle = handle; 363 args->size = obj->size; 364 365 return 0; 366 } 367 368 /** 369 * Called at device open time, sets up the structure for handling refcounting 370 * of mm objects. 371 */ 372 void 373 drm_gem_open(struct drm_device *dev, struct drm_file *file_private) 374 { 375 idr_init(&file_private->object_idr); 376 spin_lock_init(&file_private->table_lock); 377 } 378 379 /** 380 * Called at device close to release the file's 381 * handle references on objects. 382 */ 383 static int 384 drm_gem_object_release_handle(int id, void *ptr, void *data) 385 { 386 struct drm_gem_object *obj = ptr; 387 388 drm_gem_object_handle_unreference(obj); 389 390 return 0; 391 } 392 393 /** 394 * Called at close time when the filp is going away. 395 * 396 * Releases any remaining references on objects by this filp. 397 */ 398 void 399 drm_gem_release(struct drm_device *dev, struct drm_file *file_private) 400 { 401 mutex_lock(&dev->struct_mutex); 402 idr_for_each(&file_private->object_idr, 403 &drm_gem_object_release_handle, NULL); 404 405 idr_destroy(&file_private->object_idr); 406 mutex_unlock(&dev->struct_mutex); 407 } 408 409 /** 410 * Called after the last reference to the object has been lost. 411 * 412 * Frees the object 413 */ 414 void 415 drm_gem_object_free(struct kref *kref) 416 { 417 struct drm_gem_object *obj = (struct drm_gem_object *) kref; 418 struct drm_device *dev = obj->dev; 419 420 BUG_ON(!mutex_is_locked(&dev->struct_mutex)); 421 422 if (dev->driver->gem_free_object != NULL) 423 dev->driver->gem_free_object(obj); 424 425 fput(obj->filp); 426 atomic_dec(&dev->object_count); 427 atomic_sub(obj->size, &dev->object_memory); 428 kfree(obj); 429 } 430 EXPORT_SYMBOL(drm_gem_object_free); 431 432 /** 433 * Called after the last handle to the object has been closed 434 * 435 * Removes any name for the object. Note that this must be 436 * called before drm_gem_object_free or we'll be touching 437 * freed memory 438 */ 439 void 440 drm_gem_object_handle_free(struct kref *kref) 441 { 442 struct drm_gem_object *obj = container_of(kref, 443 struct drm_gem_object, 444 handlecount); 445 struct drm_device *dev = obj->dev; 446 447 /* Remove any name for this object */ 448 spin_lock(&dev->object_name_lock); 449 if (obj->name) { 450 idr_remove(&dev->object_name_idr, obj->name); 451 spin_unlock(&dev->object_name_lock); 452 /* 453 * The object name held a reference to this object, drop 454 * that now. 455 */ 456 drm_gem_object_unreference(obj); 457 } else 458 spin_unlock(&dev->object_name_lock); 459 460 } 461 EXPORT_SYMBOL(drm_gem_object_handle_free); 462 463 /** 464 * drm_gem_mmap - memory map routine for GEM objects 465 * @filp: DRM file pointer 466 * @vma: VMA for the area to be mapped 467 * 468 * If a driver supports GEM object mapping, mmap calls on the DRM file 469 * descriptor will end up here. 470 * 471 * If we find the object based on the offset passed in (vma->vm_pgoff will 472 * contain the fake offset we created when the GTT map ioctl was called on 473 * the object), we set up the driver fault handler so that any accesses 474 * to the object can be trapped, to perform migration, GTT binding, surface 475 * register allocation, or performance monitoring. 476 */ 477 int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) 478 { 479 struct drm_file *priv = filp->private_data; 480 struct drm_device *dev = priv->minor->dev; 481 struct drm_gem_mm *mm = dev->mm_private; 482 struct drm_map *map = NULL; 483 struct drm_gem_object *obj; 484 struct drm_hash_item *hash; 485 unsigned long prot; 486 int ret = 0; 487 488 mutex_lock(&dev->struct_mutex); 489 490 if (drm_ht_find_item(&mm->offset_hash, vma->vm_pgoff, &hash)) { 491 mutex_unlock(&dev->struct_mutex); 492 return drm_mmap(filp, vma); 493 } 494 495 map = drm_hash_entry(hash, struct drm_map_list, hash)->map; 496 if (!map || 497 ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN))) { 498 ret = -EPERM; 499 goto out_unlock; 500 } 501 502 /* Check for valid size. */ 503 if (map->size < vma->vm_end - vma->vm_start) { 504 ret = -EINVAL; 505 goto out_unlock; 506 } 507 508 obj = map->handle; 509 if (!obj->dev->driver->gem_vm_ops) { 510 ret = -EINVAL; 511 goto out_unlock; 512 } 513 514 vma->vm_flags |= VM_RESERVED | VM_IO | VM_PFNMAP | VM_DONTEXPAND; 515 vma->vm_ops = obj->dev->driver->gem_vm_ops; 516 vma->vm_private_data = map->handle; 517 /* FIXME: use pgprot_writecombine when available */ 518 prot = pgprot_val(vma->vm_page_prot); 519 #ifdef CONFIG_X86 520 prot |= _PAGE_CACHE_WC; 521 #endif 522 vma->vm_page_prot = __pgprot(prot); 523 524 vma->vm_file = filp; /* Needed for drm_vm_open() */ 525 drm_vm_open_locked(vma); 526 527 out_unlock: 528 mutex_unlock(&dev->struct_mutex); 529 530 return ret; 531 } 532 EXPORT_SYMBOL(drm_gem_mmap); 533