1 /* 2 * Copyright © 2008-2015 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 * 26 */ 27 28 #include <drm/drmP.h> 29 #include <drm/drm_vma_manager.h> 30 #include <drm/i915_drm.h> 31 #include "i915_drv.h" 32 #include "i915_vgpu.h" 33 #include "i915_trace.h" 34 #include "intel_drv.h" 35 #include "intel_frontbuffer.h" 36 #include "intel_mocs.h" 37 #include <linux/dma-fence-array.h> 38 #include <linux/reservation.h> 39 #include <linux/shmem_fs.h> 40 #include <linux/slab.h> 41 #include <linux/swap.h> 42 #include <linux/pci.h> 43 #include <linux/dma-buf.h> 44 45 static void i915_gem_flush_free_objects(struct drm_i915_private *i915); 46 static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj); 47 static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj); 48 49 static bool cpu_cache_is_coherent(struct drm_device *dev, 50 enum i915_cache_level level) 51 { 52 return HAS_LLC(to_i915(dev)) || level != I915_CACHE_NONE; 53 } 54 55 static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj) 56 { 57 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) 58 return false; 59 60 if (!cpu_cache_is_coherent(obj->base.dev, obj->cache_level)) 61 return true; 62 63 return obj->pin_display; 64 } 65 66 static int 67 insert_mappable_node(struct i915_ggtt *ggtt, 68 struct drm_mm_node *node, u32 size) 69 { 70 memset(node, 0, sizeof(*node)); 71 return drm_mm_insert_node_in_range_generic(&ggtt->base.mm, node, 72 size, 0, -1, 73 0, ggtt->mappable_end, 74 DRM_MM_SEARCH_DEFAULT, 75 DRM_MM_CREATE_DEFAULT); 76 } 77 78 static void 79 remove_mappable_node(struct drm_mm_node *node) 80 { 81 drm_mm_remove_node(node); 82 } 83 84 /* some bookkeeping */ 85 static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv, 86 u64 size) 87 { 88 spin_lock(&dev_priv->mm.object_stat_lock); 89 dev_priv->mm.object_count++; 90 dev_priv->mm.object_memory += size; 91 spin_unlock(&dev_priv->mm.object_stat_lock); 92 } 93 94 static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv, 95 u64 size) 96 { 97 spin_lock(&dev_priv->mm.object_stat_lock); 98 dev_priv->mm.object_count--; 99 dev_priv->mm.object_memory -= size; 100 spin_unlock(&dev_priv->mm.object_stat_lock); 101 } 102 103 static int 104 i915_gem_wait_for_error(struct i915_gpu_error *error) 105 { 106 int ret; 107 108 might_sleep(); 109 110 if (!i915_reset_in_progress(error)) 111 return 0; 112 113 /* 114 * Only wait 10 seconds for the gpu reset to complete to avoid hanging 115 * userspace. If it takes that long something really bad is going on and 116 * we should simply try to bail out and fail as gracefully as possible. 117 */ 118 ret = wait_event_interruptible_timeout(error->reset_queue, 119 !i915_reset_in_progress(error), 120 I915_RESET_TIMEOUT); 121 if (ret == 0) { 122 DRM_ERROR("Timed out waiting for the gpu reset to complete\n"); 123 return -EIO; 124 } else if (ret < 0) { 125 return ret; 126 } else { 127 return 0; 128 } 129 } 130 131 int i915_mutex_lock_interruptible(struct drm_device *dev) 132 { 133 struct drm_i915_private *dev_priv = to_i915(dev); 134 int ret; 135 136 ret = i915_gem_wait_for_error(&dev_priv->gpu_error); 137 if (ret) 138 return ret; 139 140 ret = mutex_lock_interruptible(&dev->struct_mutex); 141 if (ret) 142 return ret; 143 144 return 0; 145 } 146 147 int 148 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, 149 struct drm_file *file) 150 { 151 struct drm_i915_private *dev_priv = to_i915(dev); 152 struct i915_ggtt *ggtt = &dev_priv->ggtt; 153 struct drm_i915_gem_get_aperture *args = data; 154 struct i915_vma *vma; 155 size_t pinned; 156 157 pinned = 0; 158 mutex_lock(&dev->struct_mutex); 159 list_for_each_entry(vma, &ggtt->base.active_list, vm_link) 160 if (i915_vma_is_pinned(vma)) 161 pinned += vma->node.size; 162 list_for_each_entry(vma, &ggtt->base.inactive_list, vm_link) 163 if (i915_vma_is_pinned(vma)) 164 pinned += vma->node.size; 165 mutex_unlock(&dev->struct_mutex); 166 167 args->aper_size = ggtt->base.total; 168 args->aper_available_size = args->aper_size - pinned; 169 170 return 0; 171 } 172 173 static struct sg_table * 174 i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj) 175 { 176 struct address_space *mapping = obj->base.filp->f_mapping; 177 char *vaddr = obj->phys_handle->vaddr; 178 struct sg_table *st; 179 struct scatterlist *sg; 180 int i; 181 182 if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj))) 183 return ERR_PTR(-EINVAL); 184 185 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) { 186 struct page *page; 187 char *src; 188 189 page = shmem_read_mapping_page(mapping, i); 190 if (IS_ERR(page)) 191 return ERR_CAST(page); 192 193 src = kmap_atomic(page); 194 memcpy(vaddr, src, PAGE_SIZE); 195 drm_clflush_virt_range(vaddr, PAGE_SIZE); 196 kunmap_atomic(src); 197 198 put_page(page); 199 vaddr += PAGE_SIZE; 200 } 201 202 i915_gem_chipset_flush(to_i915(obj->base.dev)); 203 204 st = kmalloc(sizeof(*st), GFP_KERNEL); 205 if (st == NULL) 206 return ERR_PTR(-ENOMEM); 207 208 if (sg_alloc_table(st, 1, GFP_KERNEL)) { 209 kfree(st); 210 return ERR_PTR(-ENOMEM); 211 } 212 213 sg = st->sgl; 214 sg->offset = 0; 215 sg->length = obj->base.size; 216 217 sg_dma_address(sg) = obj->phys_handle->busaddr; 218 sg_dma_len(sg) = obj->base.size; 219 220 return st; 221 } 222 223 static void 224 __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj, 225 struct sg_table *pages) 226 { 227 GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED); 228 229 if (obj->mm.madv == I915_MADV_DONTNEED) 230 obj->mm.dirty = false; 231 232 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0 && 233 !cpu_cache_is_coherent(obj->base.dev, obj->cache_level)) 234 drm_clflush_sg(pages); 235 236 obj->base.read_domains = I915_GEM_DOMAIN_CPU; 237 obj->base.write_domain = I915_GEM_DOMAIN_CPU; 238 } 239 240 static void 241 i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj, 242 struct sg_table *pages) 243 { 244 __i915_gem_object_release_shmem(obj, pages); 245 246 if (obj->mm.dirty) { 247 struct address_space *mapping = obj->base.filp->f_mapping; 248 char *vaddr = obj->phys_handle->vaddr; 249 int i; 250 251 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) { 252 struct page *page; 253 char *dst; 254 255 page = shmem_read_mapping_page(mapping, i); 256 if (IS_ERR(page)) 257 continue; 258 259 dst = kmap_atomic(page); 260 drm_clflush_virt_range(vaddr, PAGE_SIZE); 261 memcpy(dst, vaddr, PAGE_SIZE); 262 kunmap_atomic(dst); 263 264 set_page_dirty(page); 265 if (obj->mm.madv == I915_MADV_WILLNEED) 266 mark_page_accessed(page); 267 put_page(page); 268 vaddr += PAGE_SIZE; 269 } 270 obj->mm.dirty = false; 271 } 272 273 sg_free_table(pages); 274 kfree(pages); 275 } 276 277 static void 278 i915_gem_object_release_phys(struct drm_i915_gem_object *obj) 279 { 280 drm_pci_free(obj->base.dev, obj->phys_handle); 281 i915_gem_object_unpin_pages(obj); 282 } 283 284 static const struct drm_i915_gem_object_ops i915_gem_phys_ops = { 285 .get_pages = i915_gem_object_get_pages_phys, 286 .put_pages = i915_gem_object_put_pages_phys, 287 .release = i915_gem_object_release_phys, 288 }; 289 290 int i915_gem_object_unbind(struct drm_i915_gem_object *obj) 291 { 292 struct i915_vma *vma; 293 LIST_HEAD(still_in_list); 294 int ret; 295 296 lockdep_assert_held(&obj->base.dev->struct_mutex); 297 298 /* Closed vma are removed from the obj->vma_list - but they may 299 * still have an active binding on the object. To remove those we 300 * must wait for all rendering to complete to the object (as unbinding 301 * must anyway), and retire the requests. 302 */ 303 ret = i915_gem_object_wait(obj, 304 I915_WAIT_INTERRUPTIBLE | 305 I915_WAIT_LOCKED | 306 I915_WAIT_ALL, 307 MAX_SCHEDULE_TIMEOUT, 308 NULL); 309 if (ret) 310 return ret; 311 312 i915_gem_retire_requests(to_i915(obj->base.dev)); 313 314 while ((vma = list_first_entry_or_null(&obj->vma_list, 315 struct i915_vma, 316 obj_link))) { 317 list_move_tail(&vma->obj_link, &still_in_list); 318 ret = i915_vma_unbind(vma); 319 if (ret) 320 break; 321 } 322 list_splice(&still_in_list, &obj->vma_list); 323 324 return ret; 325 } 326 327 static long 328 i915_gem_object_wait_fence(struct dma_fence *fence, 329 unsigned int flags, 330 long timeout, 331 struct intel_rps_client *rps) 332 { 333 struct drm_i915_gem_request *rq; 334 335 BUILD_BUG_ON(I915_WAIT_INTERRUPTIBLE != 0x1); 336 337 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) 338 return timeout; 339 340 if (!dma_fence_is_i915(fence)) 341 return dma_fence_wait_timeout(fence, 342 flags & I915_WAIT_INTERRUPTIBLE, 343 timeout); 344 345 rq = to_request(fence); 346 if (i915_gem_request_completed(rq)) 347 goto out; 348 349 /* This client is about to stall waiting for the GPU. In many cases 350 * this is undesirable and limits the throughput of the system, as 351 * many clients cannot continue processing user input/output whilst 352 * blocked. RPS autotuning may take tens of milliseconds to respond 353 * to the GPU load and thus incurs additional latency for the client. 354 * We can circumvent that by promoting the GPU frequency to maximum 355 * before we wait. This makes the GPU throttle up much more quickly 356 * (good for benchmarks and user experience, e.g. window animations), 357 * but at a cost of spending more power processing the workload 358 * (bad for battery). Not all clients even want their results 359 * immediately and for them we should just let the GPU select its own 360 * frequency to maximise efficiency. To prevent a single client from 361 * forcing the clocks too high for the whole system, we only allow 362 * each client to waitboost once in a busy period. 363 */ 364 if (rps) { 365 if (INTEL_GEN(rq->i915) >= 6) 366 gen6_rps_boost(rq->i915, rps, rq->emitted_jiffies); 367 else 368 rps = NULL; 369 } 370 371 timeout = i915_wait_request(rq, flags, timeout); 372 373 out: 374 if (flags & I915_WAIT_LOCKED && i915_gem_request_completed(rq)) 375 i915_gem_request_retire_upto(rq); 376 377 if (rps && rq->global_seqno == intel_engine_last_submit(rq->engine)) { 378 /* The GPU is now idle and this client has stalled. 379 * Since no other client has submitted a request in the 380 * meantime, assume that this client is the only one 381 * supplying work to the GPU but is unable to keep that 382 * work supplied because it is waiting. Since the GPU is 383 * then never kept fully busy, RPS autoclocking will 384 * keep the clocks relatively low, causing further delays. 385 * Compensate by giving the synchronous client credit for 386 * a waitboost next time. 387 */ 388 spin_lock(&rq->i915->rps.client_lock); 389 list_del_init(&rps->link); 390 spin_unlock(&rq->i915->rps.client_lock); 391 } 392 393 return timeout; 394 } 395 396 static long 397 i915_gem_object_wait_reservation(struct reservation_object *resv, 398 unsigned int flags, 399 long timeout, 400 struct intel_rps_client *rps) 401 { 402 struct dma_fence *excl; 403 404 if (flags & I915_WAIT_ALL) { 405 struct dma_fence **shared; 406 unsigned int count, i; 407 int ret; 408 409 ret = reservation_object_get_fences_rcu(resv, 410 &excl, &count, &shared); 411 if (ret) 412 return ret; 413 414 for (i = 0; i < count; i++) { 415 timeout = i915_gem_object_wait_fence(shared[i], 416 flags, timeout, 417 rps); 418 if (timeout <= 0) 419 break; 420 421 dma_fence_put(shared[i]); 422 } 423 424 for (; i < count; i++) 425 dma_fence_put(shared[i]); 426 kfree(shared); 427 } else { 428 excl = reservation_object_get_excl_rcu(resv); 429 } 430 431 if (excl && timeout > 0) 432 timeout = i915_gem_object_wait_fence(excl, flags, timeout, rps); 433 434 dma_fence_put(excl); 435 436 return timeout; 437 } 438 439 static void __fence_set_priority(struct dma_fence *fence, int prio) 440 { 441 struct drm_i915_gem_request *rq; 442 struct intel_engine_cs *engine; 443 444 if (!dma_fence_is_i915(fence)) 445 return; 446 447 rq = to_request(fence); 448 engine = rq->engine; 449 if (!engine->schedule) 450 return; 451 452 engine->schedule(rq, prio); 453 } 454 455 static void fence_set_priority(struct dma_fence *fence, int prio) 456 { 457 /* Recurse once into a fence-array */ 458 if (dma_fence_is_array(fence)) { 459 struct dma_fence_array *array = to_dma_fence_array(fence); 460 int i; 461 462 for (i = 0; i < array->num_fences; i++) 463 __fence_set_priority(array->fences[i], prio); 464 } else { 465 __fence_set_priority(fence, prio); 466 } 467 } 468 469 int 470 i915_gem_object_wait_priority(struct drm_i915_gem_object *obj, 471 unsigned int flags, 472 int prio) 473 { 474 struct dma_fence *excl; 475 476 if (flags & I915_WAIT_ALL) { 477 struct dma_fence **shared; 478 unsigned int count, i; 479 int ret; 480 481 ret = reservation_object_get_fences_rcu(obj->resv, 482 &excl, &count, &shared); 483 if (ret) 484 return ret; 485 486 for (i = 0; i < count; i++) { 487 fence_set_priority(shared[i], prio); 488 dma_fence_put(shared[i]); 489 } 490 491 kfree(shared); 492 } else { 493 excl = reservation_object_get_excl_rcu(obj->resv); 494 } 495 496 if (excl) { 497 fence_set_priority(excl, prio); 498 dma_fence_put(excl); 499 } 500 return 0; 501 } 502 503 /** 504 * Waits for rendering to the object to be completed 505 * @obj: i915 gem object 506 * @flags: how to wait (under a lock, for all rendering or just for writes etc) 507 * @timeout: how long to wait 508 * @rps: client (user process) to charge for any waitboosting 509 */ 510 int 511 i915_gem_object_wait(struct drm_i915_gem_object *obj, 512 unsigned int flags, 513 long timeout, 514 struct intel_rps_client *rps) 515 { 516 might_sleep(); 517 #if IS_ENABLED(CONFIG_LOCKDEP) 518 GEM_BUG_ON(debug_locks && 519 !!lockdep_is_held(&obj->base.dev->struct_mutex) != 520 !!(flags & I915_WAIT_LOCKED)); 521 #endif 522 GEM_BUG_ON(timeout < 0); 523 524 timeout = i915_gem_object_wait_reservation(obj->resv, 525 flags, timeout, 526 rps); 527 return timeout < 0 ? timeout : 0; 528 } 529 530 static struct intel_rps_client *to_rps_client(struct drm_file *file) 531 { 532 struct drm_i915_file_private *fpriv = file->driver_priv; 533 534 return &fpriv->rps; 535 } 536 537 int 538 i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, 539 int align) 540 { 541 drm_dma_handle_t *phys; 542 int ret; 543 544 if (obj->phys_handle) { 545 if ((unsigned long)obj->phys_handle->vaddr & (align -1)) 546 return -EBUSY; 547 548 return 0; 549 } 550 551 if (obj->mm.madv != I915_MADV_WILLNEED) 552 return -EFAULT; 553 554 if (obj->base.filp == NULL) 555 return -EINVAL; 556 557 ret = i915_gem_object_unbind(obj); 558 if (ret) 559 return ret; 560 561 __i915_gem_object_put_pages(obj, I915_MM_NORMAL); 562 if (obj->mm.pages) 563 return -EBUSY; 564 565 /* create a new object */ 566 phys = drm_pci_alloc(obj->base.dev, obj->base.size, align); 567 if (!phys) 568 return -ENOMEM; 569 570 obj->phys_handle = phys; 571 obj->ops = &i915_gem_phys_ops; 572 573 return i915_gem_object_pin_pages(obj); 574 } 575 576 static int 577 i915_gem_phys_pwrite(struct drm_i915_gem_object *obj, 578 struct drm_i915_gem_pwrite *args, 579 struct drm_file *file) 580 { 581 struct drm_device *dev = obj->base.dev; 582 void *vaddr = obj->phys_handle->vaddr + args->offset; 583 char __user *user_data = u64_to_user_ptr(args->data_ptr); 584 int ret; 585 586 /* We manually control the domain here and pretend that it 587 * remains coherent i.e. in the GTT domain, like shmem_pwrite. 588 */ 589 lockdep_assert_held(&obj->base.dev->struct_mutex); 590 ret = i915_gem_object_wait(obj, 591 I915_WAIT_INTERRUPTIBLE | 592 I915_WAIT_LOCKED | 593 I915_WAIT_ALL, 594 MAX_SCHEDULE_TIMEOUT, 595 to_rps_client(file)); 596 if (ret) 597 return ret; 598 599 intel_fb_obj_invalidate(obj, ORIGIN_CPU); 600 if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) { 601 unsigned long unwritten; 602 603 /* The physical object once assigned is fixed for the lifetime 604 * of the obj, so we can safely drop the lock and continue 605 * to access vaddr. 606 */ 607 mutex_unlock(&dev->struct_mutex); 608 unwritten = copy_from_user(vaddr, user_data, args->size); 609 mutex_lock(&dev->struct_mutex); 610 if (unwritten) { 611 ret = -EFAULT; 612 goto out; 613 } 614 } 615 616 drm_clflush_virt_range(vaddr, args->size); 617 i915_gem_chipset_flush(to_i915(dev)); 618 619 out: 620 intel_fb_obj_flush(obj, false, ORIGIN_CPU); 621 return ret; 622 } 623 624 void *i915_gem_object_alloc(struct drm_device *dev) 625 { 626 struct drm_i915_private *dev_priv = to_i915(dev); 627 return kmem_cache_zalloc(dev_priv->objects, GFP_KERNEL); 628 } 629 630 void i915_gem_object_free(struct drm_i915_gem_object *obj) 631 { 632 struct drm_i915_private *dev_priv = to_i915(obj->base.dev); 633 kmem_cache_free(dev_priv->objects, obj); 634 } 635 636 static int 637 i915_gem_create(struct drm_file *file, 638 struct drm_device *dev, 639 uint64_t size, 640 uint32_t *handle_p) 641 { 642 struct drm_i915_gem_object *obj; 643 int ret; 644 u32 handle; 645 646 size = roundup(size, PAGE_SIZE); 647 if (size == 0) 648 return -EINVAL; 649 650 /* Allocate the new object */ 651 obj = i915_gem_object_create(dev, size); 652 if (IS_ERR(obj)) 653 return PTR_ERR(obj); 654 655 ret = drm_gem_handle_create(file, &obj->base, &handle); 656 /* drop reference from allocate - handle holds it now */ 657 i915_gem_object_put(obj); 658 if (ret) 659 return ret; 660 661 *handle_p = handle; 662 return 0; 663 } 664 665 int 666 i915_gem_dumb_create(struct drm_file *file, 667 struct drm_device *dev, 668 struct drm_mode_create_dumb *args) 669 { 670 /* have to work out size/pitch and return them */ 671 args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64); 672 args->size = args->pitch * args->height; 673 return i915_gem_create(file, dev, 674 args->size, &args->handle); 675 } 676 677 /** 678 * Creates a new mm object and returns a handle to it. 679 * @dev: drm device pointer 680 * @data: ioctl data blob 681 * @file: drm file pointer 682 */ 683 int 684 i915_gem_create_ioctl(struct drm_device *dev, void *data, 685 struct drm_file *file) 686 { 687 struct drm_i915_gem_create *args = data; 688 689 i915_gem_flush_free_objects(to_i915(dev)); 690 691 return i915_gem_create(file, dev, 692 args->size, &args->handle); 693 } 694 695 static inline int 696 __copy_to_user_swizzled(char __user *cpu_vaddr, 697 const char *gpu_vaddr, int gpu_offset, 698 int length) 699 { 700 int ret, cpu_offset = 0; 701 702 while (length > 0) { 703 int cacheline_end = ALIGN(gpu_offset + 1, 64); 704 int this_length = min(cacheline_end - gpu_offset, length); 705 int swizzled_gpu_offset = gpu_offset ^ 64; 706 707 ret = __copy_to_user(cpu_vaddr + cpu_offset, 708 gpu_vaddr + swizzled_gpu_offset, 709 this_length); 710 if (ret) 711 return ret + length; 712 713 cpu_offset += this_length; 714 gpu_offset += this_length; 715 length -= this_length; 716 } 717 718 return 0; 719 } 720 721 static inline int 722 __copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset, 723 const char __user *cpu_vaddr, 724 int length) 725 { 726 int ret, cpu_offset = 0; 727 728 while (length > 0) { 729 int cacheline_end = ALIGN(gpu_offset + 1, 64); 730 int this_length = min(cacheline_end - gpu_offset, length); 731 int swizzled_gpu_offset = gpu_offset ^ 64; 732 733 ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset, 734 cpu_vaddr + cpu_offset, 735 this_length); 736 if (ret) 737 return ret + length; 738 739 cpu_offset += this_length; 740 gpu_offset += this_length; 741 length -= this_length; 742 } 743 744 return 0; 745 } 746 747 /* 748 * Pins the specified object's pages and synchronizes the object with 749 * GPU accesses. Sets needs_clflush to non-zero if the caller should 750 * flush the object from the CPU cache. 751 */ 752 int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj, 753 unsigned int *needs_clflush) 754 { 755 int ret; 756 757 lockdep_assert_held(&obj->base.dev->struct_mutex); 758 759 *needs_clflush = 0; 760 if (!i915_gem_object_has_struct_page(obj)) 761 return -ENODEV; 762 763 ret = i915_gem_object_wait(obj, 764 I915_WAIT_INTERRUPTIBLE | 765 I915_WAIT_LOCKED, 766 MAX_SCHEDULE_TIMEOUT, 767 NULL); 768 if (ret) 769 return ret; 770 771 ret = i915_gem_object_pin_pages(obj); 772 if (ret) 773 return ret; 774 775 i915_gem_object_flush_gtt_write_domain(obj); 776 777 /* If we're not in the cpu read domain, set ourself into the gtt 778 * read domain and manually flush cachelines (if required). This 779 * optimizes for the case when the gpu will dirty the data 780 * anyway again before the next pread happens. 781 */ 782 if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) 783 *needs_clflush = !cpu_cache_is_coherent(obj->base.dev, 784 obj->cache_level); 785 786 if (*needs_clflush && !static_cpu_has(X86_FEATURE_CLFLUSH)) { 787 ret = i915_gem_object_set_to_cpu_domain(obj, false); 788 if (ret) 789 goto err_unpin; 790 791 *needs_clflush = 0; 792 } 793 794 /* return with the pages pinned */ 795 return 0; 796 797 err_unpin: 798 i915_gem_object_unpin_pages(obj); 799 return ret; 800 } 801 802 int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj, 803 unsigned int *needs_clflush) 804 { 805 int ret; 806 807 lockdep_assert_held(&obj->base.dev->struct_mutex); 808 809 *needs_clflush = 0; 810 if (!i915_gem_object_has_struct_page(obj)) 811 return -ENODEV; 812 813 ret = i915_gem_object_wait(obj, 814 I915_WAIT_INTERRUPTIBLE | 815 I915_WAIT_LOCKED | 816 I915_WAIT_ALL, 817 MAX_SCHEDULE_TIMEOUT, 818 NULL); 819 if (ret) 820 return ret; 821 822 ret = i915_gem_object_pin_pages(obj); 823 if (ret) 824 return ret; 825 826 i915_gem_object_flush_gtt_write_domain(obj); 827 828 /* If we're not in the cpu write domain, set ourself into the 829 * gtt write domain and manually flush cachelines (as required). 830 * This optimizes for the case when the gpu will use the data 831 * right away and we therefore have to clflush anyway. 832 */ 833 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) 834 *needs_clflush |= cpu_write_needs_clflush(obj) << 1; 835 836 /* Same trick applies to invalidate partially written cachelines read 837 * before writing. 838 */ 839 if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) 840 *needs_clflush |= !cpu_cache_is_coherent(obj->base.dev, 841 obj->cache_level); 842 843 if (*needs_clflush && !static_cpu_has(X86_FEATURE_CLFLUSH)) { 844 ret = i915_gem_object_set_to_cpu_domain(obj, true); 845 if (ret) 846 goto err_unpin; 847 848 *needs_clflush = 0; 849 } 850 851 if ((*needs_clflush & CLFLUSH_AFTER) == 0) 852 obj->cache_dirty = true; 853 854 intel_fb_obj_invalidate(obj, ORIGIN_CPU); 855 obj->mm.dirty = true; 856 /* return with the pages pinned */ 857 return 0; 858 859 err_unpin: 860 i915_gem_object_unpin_pages(obj); 861 return ret; 862 } 863 864 static void 865 shmem_clflush_swizzled_range(char *addr, unsigned long length, 866 bool swizzled) 867 { 868 if (unlikely(swizzled)) { 869 unsigned long start = (unsigned long) addr; 870 unsigned long end = (unsigned long) addr + length; 871 872 /* For swizzling simply ensure that we always flush both 873 * channels. Lame, but simple and it works. Swizzled 874 * pwrite/pread is far from a hotpath - current userspace 875 * doesn't use it at all. */ 876 start = round_down(start, 128); 877 end = round_up(end, 128); 878 879 drm_clflush_virt_range((void *)start, end - start); 880 } else { 881 drm_clflush_virt_range(addr, length); 882 } 883 884 } 885 886 /* Only difference to the fast-path function is that this can handle bit17 887 * and uses non-atomic copy and kmap functions. */ 888 static int 889 shmem_pread_slow(struct page *page, int offset, int length, 890 char __user *user_data, 891 bool page_do_bit17_swizzling, bool needs_clflush) 892 { 893 char *vaddr; 894 int ret; 895 896 vaddr = kmap(page); 897 if (needs_clflush) 898 shmem_clflush_swizzled_range(vaddr + offset, length, 899 page_do_bit17_swizzling); 900 901 if (page_do_bit17_swizzling) 902 ret = __copy_to_user_swizzled(user_data, vaddr, offset, length); 903 else 904 ret = __copy_to_user(user_data, vaddr + offset, length); 905 kunmap(page); 906 907 return ret ? - EFAULT : 0; 908 } 909 910 static int 911 shmem_pread(struct page *page, int offset, int length, char __user *user_data, 912 bool page_do_bit17_swizzling, bool needs_clflush) 913 { 914 int ret; 915 916 ret = -ENODEV; 917 if (!page_do_bit17_swizzling) { 918 char *vaddr = kmap_atomic(page); 919 920 if (needs_clflush) 921 drm_clflush_virt_range(vaddr + offset, length); 922 ret = __copy_to_user_inatomic(user_data, vaddr + offset, length); 923 kunmap_atomic(vaddr); 924 } 925 if (ret == 0) 926 return 0; 927 928 return shmem_pread_slow(page, offset, length, user_data, 929 page_do_bit17_swizzling, needs_clflush); 930 } 931 932 static int 933 i915_gem_shmem_pread(struct drm_i915_gem_object *obj, 934 struct drm_i915_gem_pread *args) 935 { 936 char __user *user_data; 937 u64 remain; 938 unsigned int obj_do_bit17_swizzling; 939 unsigned int needs_clflush; 940 unsigned int idx, offset; 941 int ret; 942 943 obj_do_bit17_swizzling = 0; 944 if (i915_gem_object_needs_bit17_swizzle(obj)) 945 obj_do_bit17_swizzling = BIT(17); 946 947 ret = mutex_lock_interruptible(&obj->base.dev->struct_mutex); 948 if (ret) 949 return ret; 950 951 ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush); 952 mutex_unlock(&obj->base.dev->struct_mutex); 953 if (ret) 954 return ret; 955 956 remain = args->size; 957 user_data = u64_to_user_ptr(args->data_ptr); 958 offset = offset_in_page(args->offset); 959 for (idx = args->offset >> PAGE_SHIFT; remain; idx++) { 960 struct page *page = i915_gem_object_get_page(obj, idx); 961 int length; 962 963 length = remain; 964 if (offset + length > PAGE_SIZE) 965 length = PAGE_SIZE - offset; 966 967 ret = shmem_pread(page, offset, length, user_data, 968 page_to_phys(page) & obj_do_bit17_swizzling, 969 needs_clflush); 970 if (ret) 971 break; 972 973 remain -= length; 974 user_data += length; 975 offset = 0; 976 } 977 978 i915_gem_obj_finish_shmem_access(obj); 979 return ret; 980 } 981 982 static inline bool 983 gtt_user_read(struct io_mapping *mapping, 984 loff_t base, int offset, 985 char __user *user_data, int length) 986 { 987 void *vaddr; 988 unsigned long unwritten; 989 990 /* We can use the cpu mem copy function because this is X86. */ 991 vaddr = (void __force *)io_mapping_map_atomic_wc(mapping, base); 992 unwritten = __copy_to_user_inatomic(user_data, vaddr + offset, length); 993 io_mapping_unmap_atomic(vaddr); 994 if (unwritten) { 995 vaddr = (void __force *) 996 io_mapping_map_wc(mapping, base, PAGE_SIZE); 997 unwritten = copy_to_user(user_data, vaddr + offset, length); 998 io_mapping_unmap(vaddr); 999 } 1000 return unwritten; 1001 } 1002 1003 static int 1004 i915_gem_gtt_pread(struct drm_i915_gem_object *obj, 1005 const struct drm_i915_gem_pread *args) 1006 { 1007 struct drm_i915_private *i915 = to_i915(obj->base.dev); 1008 struct i915_ggtt *ggtt = &i915->ggtt; 1009 struct drm_mm_node node; 1010 struct i915_vma *vma; 1011 void __user *user_data; 1012 u64 remain, offset; 1013 int ret; 1014 1015 ret = mutex_lock_interruptible(&i915->drm.struct_mutex); 1016 if (ret) 1017 return ret; 1018 1019 intel_runtime_pm_get(i915); 1020 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 1021 PIN_MAPPABLE | PIN_NONBLOCK); 1022 if (!IS_ERR(vma)) { 1023 node.start = i915_ggtt_offset(vma); 1024 node.allocated = false; 1025 ret = i915_vma_put_fence(vma); 1026 if (ret) { 1027 i915_vma_unpin(vma); 1028 vma = ERR_PTR(ret); 1029 } 1030 } 1031 if (IS_ERR(vma)) { 1032 ret = insert_mappable_node(ggtt, &node, PAGE_SIZE); 1033 if (ret) 1034 goto out_unlock; 1035 GEM_BUG_ON(!node.allocated); 1036 } 1037 1038 ret = i915_gem_object_set_to_gtt_domain(obj, false); 1039 if (ret) 1040 goto out_unpin; 1041 1042 mutex_unlock(&i915->drm.struct_mutex); 1043 1044 user_data = u64_to_user_ptr(args->data_ptr); 1045 remain = args->size; 1046 offset = args->offset; 1047 1048 while (remain > 0) { 1049 /* Operation in this page 1050 * 1051 * page_base = page offset within aperture 1052 * page_offset = offset within page 1053 * page_length = bytes to copy for this page 1054 */ 1055 u32 page_base = node.start; 1056 unsigned page_offset = offset_in_page(offset); 1057 unsigned page_length = PAGE_SIZE - page_offset; 1058 page_length = remain < page_length ? remain : page_length; 1059 if (node.allocated) { 1060 wmb(); 1061 ggtt->base.insert_page(&ggtt->base, 1062 i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT), 1063 node.start, I915_CACHE_NONE, 0); 1064 wmb(); 1065 } else { 1066 page_base += offset & PAGE_MASK; 1067 } 1068 1069 if (gtt_user_read(&ggtt->mappable, page_base, page_offset, 1070 user_data, page_length)) { 1071 ret = -EFAULT; 1072 break; 1073 } 1074 1075 remain -= page_length; 1076 user_data += page_length; 1077 offset += page_length; 1078 } 1079 1080 mutex_lock(&i915->drm.struct_mutex); 1081 out_unpin: 1082 if (node.allocated) { 1083 wmb(); 1084 ggtt->base.clear_range(&ggtt->base, 1085 node.start, node.size); 1086 remove_mappable_node(&node); 1087 } else { 1088 i915_vma_unpin(vma); 1089 } 1090 out_unlock: 1091 intel_runtime_pm_put(i915); 1092 mutex_unlock(&i915->drm.struct_mutex); 1093 1094 return ret; 1095 } 1096 1097 /** 1098 * Reads data from the object referenced by handle. 1099 * @dev: drm device pointer 1100 * @data: ioctl data blob 1101 * @file: drm file pointer 1102 * 1103 * On error, the contents of *data are undefined. 1104 */ 1105 int 1106 i915_gem_pread_ioctl(struct drm_device *dev, void *data, 1107 struct drm_file *file) 1108 { 1109 struct drm_i915_gem_pread *args = data; 1110 struct drm_i915_gem_object *obj; 1111 int ret; 1112 1113 if (args->size == 0) 1114 return 0; 1115 1116 if (!access_ok(VERIFY_WRITE, 1117 u64_to_user_ptr(args->data_ptr), 1118 args->size)) 1119 return -EFAULT; 1120 1121 obj = i915_gem_object_lookup(file, args->handle); 1122 if (!obj) 1123 return -ENOENT; 1124 1125 /* Bounds check source. */ 1126 if (args->offset > obj->base.size || 1127 args->size > obj->base.size - args->offset) { 1128 ret = -EINVAL; 1129 goto out; 1130 } 1131 1132 trace_i915_gem_object_pread(obj, args->offset, args->size); 1133 1134 ret = i915_gem_object_wait(obj, 1135 I915_WAIT_INTERRUPTIBLE, 1136 MAX_SCHEDULE_TIMEOUT, 1137 to_rps_client(file)); 1138 if (ret) 1139 goto out; 1140 1141 ret = i915_gem_object_pin_pages(obj); 1142 if (ret) 1143 goto out; 1144 1145 ret = i915_gem_shmem_pread(obj, args); 1146 if (ret == -EFAULT || ret == -ENODEV) 1147 ret = i915_gem_gtt_pread(obj, args); 1148 1149 i915_gem_object_unpin_pages(obj); 1150 out: 1151 i915_gem_object_put(obj); 1152 return ret; 1153 } 1154 1155 /* This is the fast write path which cannot handle 1156 * page faults in the source data 1157 */ 1158 1159 static inline bool 1160 ggtt_write(struct io_mapping *mapping, 1161 loff_t base, int offset, 1162 char __user *user_data, int length) 1163 { 1164 void *vaddr; 1165 unsigned long unwritten; 1166 1167 /* We can use the cpu mem copy function because this is X86. */ 1168 vaddr = (void __force *)io_mapping_map_atomic_wc(mapping, base); 1169 unwritten = __copy_from_user_inatomic_nocache(vaddr + offset, 1170 user_data, length); 1171 io_mapping_unmap_atomic(vaddr); 1172 if (unwritten) { 1173 vaddr = (void __force *) 1174 io_mapping_map_wc(mapping, base, PAGE_SIZE); 1175 unwritten = copy_from_user(vaddr + offset, user_data, length); 1176 io_mapping_unmap(vaddr); 1177 } 1178 1179 return unwritten; 1180 } 1181 1182 /** 1183 * This is the fast pwrite path, where we copy the data directly from the 1184 * user into the GTT, uncached. 1185 * @obj: i915 GEM object 1186 * @args: pwrite arguments structure 1187 */ 1188 static int 1189 i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj, 1190 const struct drm_i915_gem_pwrite *args) 1191 { 1192 struct drm_i915_private *i915 = to_i915(obj->base.dev); 1193 struct i915_ggtt *ggtt = &i915->ggtt; 1194 struct drm_mm_node node; 1195 struct i915_vma *vma; 1196 u64 remain, offset; 1197 void __user *user_data; 1198 int ret; 1199 1200 ret = mutex_lock_interruptible(&i915->drm.struct_mutex); 1201 if (ret) 1202 return ret; 1203 1204 intel_runtime_pm_get(i915); 1205 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 1206 PIN_MAPPABLE | PIN_NONBLOCK); 1207 if (!IS_ERR(vma)) { 1208 node.start = i915_ggtt_offset(vma); 1209 node.allocated = false; 1210 ret = i915_vma_put_fence(vma); 1211 if (ret) { 1212 i915_vma_unpin(vma); 1213 vma = ERR_PTR(ret); 1214 } 1215 } 1216 if (IS_ERR(vma)) { 1217 ret = insert_mappable_node(ggtt, &node, PAGE_SIZE); 1218 if (ret) 1219 goto out_unlock; 1220 GEM_BUG_ON(!node.allocated); 1221 } 1222 1223 ret = i915_gem_object_set_to_gtt_domain(obj, true); 1224 if (ret) 1225 goto out_unpin; 1226 1227 mutex_unlock(&i915->drm.struct_mutex); 1228 1229 intel_fb_obj_invalidate(obj, ORIGIN_CPU); 1230 1231 user_data = u64_to_user_ptr(args->data_ptr); 1232 offset = args->offset; 1233 remain = args->size; 1234 while (remain) { 1235 /* Operation in this page 1236 * 1237 * page_base = page offset within aperture 1238 * page_offset = offset within page 1239 * page_length = bytes to copy for this page 1240 */ 1241 u32 page_base = node.start; 1242 unsigned int page_offset = offset_in_page(offset); 1243 unsigned int page_length = PAGE_SIZE - page_offset; 1244 page_length = remain < page_length ? remain : page_length; 1245 if (node.allocated) { 1246 wmb(); /* flush the write before we modify the GGTT */ 1247 ggtt->base.insert_page(&ggtt->base, 1248 i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT), 1249 node.start, I915_CACHE_NONE, 0); 1250 wmb(); /* flush modifications to the GGTT (insert_page) */ 1251 } else { 1252 page_base += offset & PAGE_MASK; 1253 } 1254 /* If we get a fault while copying data, then (presumably) our 1255 * source page isn't available. Return the error and we'll 1256 * retry in the slow path. 1257 * If the object is non-shmem backed, we retry again with the 1258 * path that handles page fault. 1259 */ 1260 if (ggtt_write(&ggtt->mappable, page_base, page_offset, 1261 user_data, page_length)) { 1262 ret = -EFAULT; 1263 break; 1264 } 1265 1266 remain -= page_length; 1267 user_data += page_length; 1268 offset += page_length; 1269 } 1270 intel_fb_obj_flush(obj, false, ORIGIN_CPU); 1271 1272 mutex_lock(&i915->drm.struct_mutex); 1273 out_unpin: 1274 if (node.allocated) { 1275 wmb(); 1276 ggtt->base.clear_range(&ggtt->base, 1277 node.start, node.size); 1278 remove_mappable_node(&node); 1279 } else { 1280 i915_vma_unpin(vma); 1281 } 1282 out_unlock: 1283 intel_runtime_pm_put(i915); 1284 mutex_unlock(&i915->drm.struct_mutex); 1285 return ret; 1286 } 1287 1288 static int 1289 shmem_pwrite_slow(struct page *page, int offset, int length, 1290 char __user *user_data, 1291 bool page_do_bit17_swizzling, 1292 bool needs_clflush_before, 1293 bool needs_clflush_after) 1294 { 1295 char *vaddr; 1296 int ret; 1297 1298 vaddr = kmap(page); 1299 if (unlikely(needs_clflush_before || page_do_bit17_swizzling)) 1300 shmem_clflush_swizzled_range(vaddr + offset, length, 1301 page_do_bit17_swizzling); 1302 if (page_do_bit17_swizzling) 1303 ret = __copy_from_user_swizzled(vaddr, offset, user_data, 1304 length); 1305 else 1306 ret = __copy_from_user(vaddr + offset, user_data, length); 1307 if (needs_clflush_after) 1308 shmem_clflush_swizzled_range(vaddr + offset, length, 1309 page_do_bit17_swizzling); 1310 kunmap(page); 1311 1312 return ret ? -EFAULT : 0; 1313 } 1314 1315 /* Per-page copy function for the shmem pwrite fastpath. 1316 * Flushes invalid cachelines before writing to the target if 1317 * needs_clflush_before is set and flushes out any written cachelines after 1318 * writing if needs_clflush is set. 1319 */ 1320 static int 1321 shmem_pwrite(struct page *page, int offset, int len, char __user *user_data, 1322 bool page_do_bit17_swizzling, 1323 bool needs_clflush_before, 1324 bool needs_clflush_after) 1325 { 1326 int ret; 1327 1328 ret = -ENODEV; 1329 if (!page_do_bit17_swizzling) { 1330 char *vaddr = kmap_atomic(page); 1331 1332 if (needs_clflush_before) 1333 drm_clflush_virt_range(vaddr + offset, len); 1334 ret = __copy_from_user_inatomic(vaddr + offset, user_data, len); 1335 if (needs_clflush_after) 1336 drm_clflush_virt_range(vaddr + offset, len); 1337 1338 kunmap_atomic(vaddr); 1339 } 1340 if (ret == 0) 1341 return ret; 1342 1343 return shmem_pwrite_slow(page, offset, len, user_data, 1344 page_do_bit17_swizzling, 1345 needs_clflush_before, 1346 needs_clflush_after); 1347 } 1348 1349 static int 1350 i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj, 1351 const struct drm_i915_gem_pwrite *args) 1352 { 1353 struct drm_i915_private *i915 = to_i915(obj->base.dev); 1354 void __user *user_data; 1355 u64 remain; 1356 unsigned int obj_do_bit17_swizzling; 1357 unsigned int partial_cacheline_write; 1358 unsigned int needs_clflush; 1359 unsigned int offset, idx; 1360 int ret; 1361 1362 ret = mutex_lock_interruptible(&i915->drm.struct_mutex); 1363 if (ret) 1364 return ret; 1365 1366 ret = i915_gem_obj_prepare_shmem_write(obj, &needs_clflush); 1367 mutex_unlock(&i915->drm.struct_mutex); 1368 if (ret) 1369 return ret; 1370 1371 obj_do_bit17_swizzling = 0; 1372 if (i915_gem_object_needs_bit17_swizzle(obj)) 1373 obj_do_bit17_swizzling = BIT(17); 1374 1375 /* If we don't overwrite a cacheline completely we need to be 1376 * careful to have up-to-date data by first clflushing. Don't 1377 * overcomplicate things and flush the entire patch. 1378 */ 1379 partial_cacheline_write = 0; 1380 if (needs_clflush & CLFLUSH_BEFORE) 1381 partial_cacheline_write = boot_cpu_data.x86_clflush_size - 1; 1382 1383 user_data = u64_to_user_ptr(args->data_ptr); 1384 remain = args->size; 1385 offset = offset_in_page(args->offset); 1386 for (idx = args->offset >> PAGE_SHIFT; remain; idx++) { 1387 struct page *page = i915_gem_object_get_page(obj, idx); 1388 int length; 1389 1390 length = remain; 1391 if (offset + length > PAGE_SIZE) 1392 length = PAGE_SIZE - offset; 1393 1394 ret = shmem_pwrite(page, offset, length, user_data, 1395 page_to_phys(page) & obj_do_bit17_swizzling, 1396 (offset | length) & partial_cacheline_write, 1397 needs_clflush & CLFLUSH_AFTER); 1398 if (ret) 1399 break; 1400 1401 remain -= length; 1402 user_data += length; 1403 offset = 0; 1404 } 1405 1406 intel_fb_obj_flush(obj, false, ORIGIN_CPU); 1407 i915_gem_obj_finish_shmem_access(obj); 1408 return ret; 1409 } 1410 1411 /** 1412 * Writes data to the object referenced by handle. 1413 * @dev: drm device 1414 * @data: ioctl data blob 1415 * @file: drm file 1416 * 1417 * On error, the contents of the buffer that were to be modified are undefined. 1418 */ 1419 int 1420 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, 1421 struct drm_file *file) 1422 { 1423 struct drm_i915_gem_pwrite *args = data; 1424 struct drm_i915_gem_object *obj; 1425 int ret; 1426 1427 if (args->size == 0) 1428 return 0; 1429 1430 if (!access_ok(VERIFY_READ, 1431 u64_to_user_ptr(args->data_ptr), 1432 args->size)) 1433 return -EFAULT; 1434 1435 obj = i915_gem_object_lookup(file, args->handle); 1436 if (!obj) 1437 return -ENOENT; 1438 1439 /* Bounds check destination. */ 1440 if (args->offset > obj->base.size || 1441 args->size > obj->base.size - args->offset) { 1442 ret = -EINVAL; 1443 goto err; 1444 } 1445 1446 trace_i915_gem_object_pwrite(obj, args->offset, args->size); 1447 1448 ret = i915_gem_object_wait(obj, 1449 I915_WAIT_INTERRUPTIBLE | 1450 I915_WAIT_ALL, 1451 MAX_SCHEDULE_TIMEOUT, 1452 to_rps_client(file)); 1453 if (ret) 1454 goto err; 1455 1456 ret = i915_gem_object_pin_pages(obj); 1457 if (ret) 1458 goto err; 1459 1460 ret = -EFAULT; 1461 /* We can only do the GTT pwrite on untiled buffers, as otherwise 1462 * it would end up going through the fenced access, and we'll get 1463 * different detiling behavior between reading and writing. 1464 * pread/pwrite currently are reading and writing from the CPU 1465 * perspective, requiring manual detiling by the client. 1466 */ 1467 if (!i915_gem_object_has_struct_page(obj) || 1468 cpu_write_needs_clflush(obj)) 1469 /* Note that the gtt paths might fail with non-page-backed user 1470 * pointers (e.g. gtt mappings when moving data between 1471 * textures). Fallback to the shmem path in that case. 1472 */ 1473 ret = i915_gem_gtt_pwrite_fast(obj, args); 1474 1475 if (ret == -EFAULT || ret == -ENOSPC) { 1476 if (obj->phys_handle) 1477 ret = i915_gem_phys_pwrite(obj, args, file); 1478 else 1479 ret = i915_gem_shmem_pwrite(obj, args); 1480 } 1481 1482 i915_gem_object_unpin_pages(obj); 1483 err: 1484 i915_gem_object_put(obj); 1485 return ret; 1486 } 1487 1488 static inline enum fb_op_origin 1489 write_origin(struct drm_i915_gem_object *obj, unsigned domain) 1490 { 1491 return (domain == I915_GEM_DOMAIN_GTT ? 1492 obj->frontbuffer_ggtt_origin : ORIGIN_CPU); 1493 } 1494 1495 static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj) 1496 { 1497 struct drm_i915_private *i915; 1498 struct list_head *list; 1499 struct i915_vma *vma; 1500 1501 list_for_each_entry(vma, &obj->vma_list, obj_link) { 1502 if (!i915_vma_is_ggtt(vma)) 1503 continue; 1504 1505 if (i915_vma_is_active(vma)) 1506 continue; 1507 1508 if (!drm_mm_node_allocated(&vma->node)) 1509 continue; 1510 1511 list_move_tail(&vma->vm_link, &vma->vm->inactive_list); 1512 } 1513 1514 i915 = to_i915(obj->base.dev); 1515 list = obj->bind_count ? &i915->mm.bound_list : &i915->mm.unbound_list; 1516 list_move_tail(&obj->global_link, list); 1517 } 1518 1519 /** 1520 * Called when user space prepares to use an object with the CPU, either 1521 * through the mmap ioctl's mapping or a GTT mapping. 1522 * @dev: drm device 1523 * @data: ioctl data blob 1524 * @file: drm file 1525 */ 1526 int 1527 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, 1528 struct drm_file *file) 1529 { 1530 struct drm_i915_gem_set_domain *args = data; 1531 struct drm_i915_gem_object *obj; 1532 uint32_t read_domains = args->read_domains; 1533 uint32_t write_domain = args->write_domain; 1534 int err; 1535 1536 /* Only handle setting domains to types used by the CPU. */ 1537 if ((write_domain | read_domains) & I915_GEM_GPU_DOMAINS) 1538 return -EINVAL; 1539 1540 /* Having something in the write domain implies it's in the read 1541 * domain, and only that read domain. Enforce that in the request. 1542 */ 1543 if (write_domain != 0 && read_domains != write_domain) 1544 return -EINVAL; 1545 1546 obj = i915_gem_object_lookup(file, args->handle); 1547 if (!obj) 1548 return -ENOENT; 1549 1550 /* Try to flush the object off the GPU without holding the lock. 1551 * We will repeat the flush holding the lock in the normal manner 1552 * to catch cases where we are gazumped. 1553 */ 1554 err = i915_gem_object_wait(obj, 1555 I915_WAIT_INTERRUPTIBLE | 1556 (write_domain ? I915_WAIT_ALL : 0), 1557 MAX_SCHEDULE_TIMEOUT, 1558 to_rps_client(file)); 1559 if (err) 1560 goto out; 1561 1562 /* Flush and acquire obj->pages so that we are coherent through 1563 * direct access in memory with previous cached writes through 1564 * shmemfs and that our cache domain tracking remains valid. 1565 * For example, if the obj->filp was moved to swap without us 1566 * being notified and releasing the pages, we would mistakenly 1567 * continue to assume that the obj remained out of the CPU cached 1568 * domain. 1569 */ 1570 err = i915_gem_object_pin_pages(obj); 1571 if (err) 1572 goto out; 1573 1574 err = i915_mutex_lock_interruptible(dev); 1575 if (err) 1576 goto out_unpin; 1577 1578 if (read_domains & I915_GEM_DOMAIN_GTT) 1579 err = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0); 1580 else 1581 err = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0); 1582 1583 /* And bump the LRU for this access */ 1584 i915_gem_object_bump_inactive_ggtt(obj); 1585 1586 mutex_unlock(&dev->struct_mutex); 1587 1588 if (write_domain != 0) 1589 intel_fb_obj_invalidate(obj, write_origin(obj, write_domain)); 1590 1591 out_unpin: 1592 i915_gem_object_unpin_pages(obj); 1593 out: 1594 i915_gem_object_put(obj); 1595 return err; 1596 } 1597 1598 /** 1599 * Called when user space has done writes to this buffer 1600 * @dev: drm device 1601 * @data: ioctl data blob 1602 * @file: drm file 1603 */ 1604 int 1605 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, 1606 struct drm_file *file) 1607 { 1608 struct drm_i915_gem_sw_finish *args = data; 1609 struct drm_i915_gem_object *obj; 1610 int err = 0; 1611 1612 obj = i915_gem_object_lookup(file, args->handle); 1613 if (!obj) 1614 return -ENOENT; 1615 1616 /* Pinned buffers may be scanout, so flush the cache */ 1617 if (READ_ONCE(obj->pin_display)) { 1618 err = i915_mutex_lock_interruptible(dev); 1619 if (!err) { 1620 i915_gem_object_flush_cpu_write_domain(obj); 1621 mutex_unlock(&dev->struct_mutex); 1622 } 1623 } 1624 1625 i915_gem_object_put(obj); 1626 return err; 1627 } 1628 1629 /** 1630 * i915_gem_mmap_ioctl - Maps the contents of an object, returning the address 1631 * it is mapped to. 1632 * @dev: drm device 1633 * @data: ioctl data blob 1634 * @file: drm file 1635 * 1636 * While the mapping holds a reference on the contents of the object, it doesn't 1637 * imply a ref on the object itself. 1638 * 1639 * IMPORTANT: 1640 * 1641 * DRM driver writers who look a this function as an example for how to do GEM 1642 * mmap support, please don't implement mmap support like here. The modern way 1643 * to implement DRM mmap support is with an mmap offset ioctl (like 1644 * i915_gem_mmap_gtt) and then using the mmap syscall on the DRM fd directly. 1645 * That way debug tooling like valgrind will understand what's going on, hiding 1646 * the mmap call in a driver private ioctl will break that. The i915 driver only 1647 * does cpu mmaps this way because we didn't know better. 1648 */ 1649 int 1650 i915_gem_mmap_ioctl(struct drm_device *dev, void *data, 1651 struct drm_file *file) 1652 { 1653 struct drm_i915_gem_mmap *args = data; 1654 struct drm_i915_gem_object *obj; 1655 unsigned long addr; 1656 1657 if (args->flags & ~(I915_MMAP_WC)) 1658 return -EINVAL; 1659 1660 if (args->flags & I915_MMAP_WC && !boot_cpu_has(X86_FEATURE_PAT)) 1661 return -ENODEV; 1662 1663 obj = i915_gem_object_lookup(file, args->handle); 1664 if (!obj) 1665 return -ENOENT; 1666 1667 /* prime objects have no backing filp to GEM mmap 1668 * pages from. 1669 */ 1670 if (!obj->base.filp) { 1671 i915_gem_object_put(obj); 1672 return -EINVAL; 1673 } 1674 1675 addr = vm_mmap(obj->base.filp, 0, args->size, 1676 PROT_READ | PROT_WRITE, MAP_SHARED, 1677 args->offset); 1678 if (args->flags & I915_MMAP_WC) { 1679 struct mm_struct *mm = current->mm; 1680 struct vm_area_struct *vma; 1681 1682 if (down_write_killable(&mm->mmap_sem)) { 1683 i915_gem_object_put(obj); 1684 return -EINTR; 1685 } 1686 vma = find_vma(mm, addr); 1687 if (vma) 1688 vma->vm_page_prot = 1689 pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); 1690 else 1691 addr = -ENOMEM; 1692 up_write(&mm->mmap_sem); 1693 1694 /* This may race, but that's ok, it only gets set */ 1695 WRITE_ONCE(obj->frontbuffer_ggtt_origin, ORIGIN_CPU); 1696 } 1697 i915_gem_object_put(obj); 1698 if (IS_ERR((void *)addr)) 1699 return addr; 1700 1701 args->addr_ptr = (uint64_t) addr; 1702 1703 return 0; 1704 } 1705 1706 static unsigned int tile_row_pages(struct drm_i915_gem_object *obj) 1707 { 1708 u64 size; 1709 1710 size = i915_gem_object_get_stride(obj); 1711 size *= i915_gem_object_get_tiling(obj) == I915_TILING_Y ? 32 : 8; 1712 1713 return size >> PAGE_SHIFT; 1714 } 1715 1716 /** 1717 * i915_gem_mmap_gtt_version - report the current feature set for GTT mmaps 1718 * 1719 * A history of the GTT mmap interface: 1720 * 1721 * 0 - Everything had to fit into the GTT. Both parties of a memcpy had to 1722 * aligned and suitable for fencing, and still fit into the available 1723 * mappable space left by the pinned display objects. A classic problem 1724 * we called the page-fault-of-doom where we would ping-pong between 1725 * two objects that could not fit inside the GTT and so the memcpy 1726 * would page one object in at the expense of the other between every 1727 * single byte. 1728 * 1729 * 1 - Objects can be any size, and have any compatible fencing (X Y, or none 1730 * as set via i915_gem_set_tiling() [DRM_I915_GEM_SET_TILING]). If the 1731 * object is too large for the available space (or simply too large 1732 * for the mappable aperture!), a view is created instead and faulted 1733 * into userspace. (This view is aligned and sized appropriately for 1734 * fenced access.) 1735 * 1736 * Restrictions: 1737 * 1738 * * snoopable objects cannot be accessed via the GTT. It can cause machine 1739 * hangs on some architectures, corruption on others. An attempt to service 1740 * a GTT page fault from a snoopable object will generate a SIGBUS. 1741 * 1742 * * the object must be able to fit into RAM (physical memory, though no 1743 * limited to the mappable aperture). 1744 * 1745 * 1746 * Caveats: 1747 * 1748 * * a new GTT page fault will synchronize rendering from the GPU and flush 1749 * all data to system memory. Subsequent access will not be synchronized. 1750 * 1751 * * all mappings are revoked on runtime device suspend. 1752 * 1753 * * there are only 8, 16 or 32 fence registers to share between all users 1754 * (older machines require fence register for display and blitter access 1755 * as well). Contention of the fence registers will cause the previous users 1756 * to be unmapped and any new access will generate new page faults. 1757 * 1758 * * running out of memory while servicing a fault may generate a SIGBUS, 1759 * rather than the expected SIGSEGV. 1760 */ 1761 int i915_gem_mmap_gtt_version(void) 1762 { 1763 return 1; 1764 } 1765 1766 /** 1767 * i915_gem_fault - fault a page into the GTT 1768 * @area: CPU VMA in question 1769 * @vmf: fault info 1770 * 1771 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped 1772 * from userspace. The fault handler takes care of binding the object to 1773 * the GTT (if needed), allocating and programming a fence register (again, 1774 * only if needed based on whether the old reg is still valid or the object 1775 * is tiled) and inserting a new PTE into the faulting process. 1776 * 1777 * Note that the faulting process may involve evicting existing objects 1778 * from the GTT and/or fence registers to make room. So performance may 1779 * suffer if the GTT working set is large or there are few fence registers 1780 * left. 1781 * 1782 * The current feature set supported by i915_gem_fault() and thus GTT mmaps 1783 * is exposed via I915_PARAM_MMAP_GTT_VERSION (see i915_gem_mmap_gtt_version). 1784 */ 1785 int i915_gem_fault(struct vm_area_struct *area, struct vm_fault *vmf) 1786 { 1787 #define MIN_CHUNK_PAGES ((1 << 20) >> PAGE_SHIFT) /* 1 MiB */ 1788 struct drm_i915_gem_object *obj = to_intel_bo(area->vm_private_data); 1789 struct drm_device *dev = obj->base.dev; 1790 struct drm_i915_private *dev_priv = to_i915(dev); 1791 struct i915_ggtt *ggtt = &dev_priv->ggtt; 1792 bool write = !!(vmf->flags & FAULT_FLAG_WRITE); 1793 struct i915_vma *vma; 1794 pgoff_t page_offset; 1795 unsigned int flags; 1796 int ret; 1797 1798 /* We don't use vmf->pgoff since that has the fake offset */ 1799 page_offset = (vmf->address - area->vm_start) >> PAGE_SHIFT; 1800 1801 trace_i915_gem_object_fault(obj, page_offset, true, write); 1802 1803 /* Try to flush the object off the GPU first without holding the lock. 1804 * Upon acquiring the lock, we will perform our sanity checks and then 1805 * repeat the flush holding the lock in the normal manner to catch cases 1806 * where we are gazumped. 1807 */ 1808 ret = i915_gem_object_wait(obj, 1809 I915_WAIT_INTERRUPTIBLE, 1810 MAX_SCHEDULE_TIMEOUT, 1811 NULL); 1812 if (ret) 1813 goto err; 1814 1815 ret = i915_gem_object_pin_pages(obj); 1816 if (ret) 1817 goto err; 1818 1819 intel_runtime_pm_get(dev_priv); 1820 1821 ret = i915_mutex_lock_interruptible(dev); 1822 if (ret) 1823 goto err_rpm; 1824 1825 /* Access to snoopable pages through the GTT is incoherent. */ 1826 if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev_priv)) { 1827 ret = -EFAULT; 1828 goto err_unlock; 1829 } 1830 1831 /* If the object is smaller than a couple of partial vma, it is 1832 * not worth only creating a single partial vma - we may as well 1833 * clear enough space for the full object. 1834 */ 1835 flags = PIN_MAPPABLE; 1836 if (obj->base.size > 2 * MIN_CHUNK_PAGES << PAGE_SHIFT) 1837 flags |= PIN_NONBLOCK | PIN_NONFAULT; 1838 1839 /* Now pin it into the GTT as needed */ 1840 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, flags); 1841 if (IS_ERR(vma)) { 1842 struct i915_ggtt_view view; 1843 unsigned int chunk_size; 1844 1845 /* Use a partial view if it is bigger than available space */ 1846 chunk_size = MIN_CHUNK_PAGES; 1847 if (i915_gem_object_is_tiled(obj)) 1848 chunk_size = roundup(chunk_size, tile_row_pages(obj)); 1849 1850 memset(&view, 0, sizeof(view)); 1851 view.type = I915_GGTT_VIEW_PARTIAL; 1852 view.params.partial.offset = rounddown(page_offset, chunk_size); 1853 view.params.partial.size = 1854 min_t(unsigned int, chunk_size, 1855 vma_pages(area) - view.params.partial.offset); 1856 1857 /* If the partial covers the entire object, just create a 1858 * normal VMA. 1859 */ 1860 if (chunk_size >= obj->base.size >> PAGE_SHIFT) 1861 view.type = I915_GGTT_VIEW_NORMAL; 1862 1863 /* Userspace is now writing through an untracked VMA, abandon 1864 * all hope that the hardware is able to track future writes. 1865 */ 1866 obj->frontbuffer_ggtt_origin = ORIGIN_CPU; 1867 1868 vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE); 1869 } 1870 if (IS_ERR(vma)) { 1871 ret = PTR_ERR(vma); 1872 goto err_unlock; 1873 } 1874 1875 ret = i915_gem_object_set_to_gtt_domain(obj, write); 1876 if (ret) 1877 goto err_unpin; 1878 1879 ret = i915_vma_get_fence(vma); 1880 if (ret) 1881 goto err_unpin; 1882 1883 /* Mark as being mmapped into userspace for later revocation */ 1884 assert_rpm_wakelock_held(dev_priv); 1885 if (list_empty(&obj->userfault_link)) 1886 list_add(&obj->userfault_link, &dev_priv->mm.userfault_list); 1887 1888 /* Finally, remap it using the new GTT offset */ 1889 ret = remap_io_mapping(area, 1890 area->vm_start + (vma->ggtt_view.params.partial.offset << PAGE_SHIFT), 1891 (ggtt->mappable_base + vma->node.start) >> PAGE_SHIFT, 1892 min_t(u64, vma->size, area->vm_end - area->vm_start), 1893 &ggtt->mappable); 1894 1895 err_unpin: 1896 __i915_vma_unpin(vma); 1897 err_unlock: 1898 mutex_unlock(&dev->struct_mutex); 1899 err_rpm: 1900 intel_runtime_pm_put(dev_priv); 1901 i915_gem_object_unpin_pages(obj); 1902 err: 1903 switch (ret) { 1904 case -EIO: 1905 /* 1906 * We eat errors when the gpu is terminally wedged to avoid 1907 * userspace unduly crashing (gl has no provisions for mmaps to 1908 * fail). But any other -EIO isn't ours (e.g. swap in failure) 1909 * and so needs to be reported. 1910 */ 1911 if (!i915_terminally_wedged(&dev_priv->gpu_error)) { 1912 ret = VM_FAULT_SIGBUS; 1913 break; 1914 } 1915 case -EAGAIN: 1916 /* 1917 * EAGAIN means the gpu is hung and we'll wait for the error 1918 * handler to reset everything when re-faulting in 1919 * i915_mutex_lock_interruptible. 1920 */ 1921 case 0: 1922 case -ERESTARTSYS: 1923 case -EINTR: 1924 case -EBUSY: 1925 /* 1926 * EBUSY is ok: this just means that another thread 1927 * already did the job. 1928 */ 1929 ret = VM_FAULT_NOPAGE; 1930 break; 1931 case -ENOMEM: 1932 ret = VM_FAULT_OOM; 1933 break; 1934 case -ENOSPC: 1935 case -EFAULT: 1936 ret = VM_FAULT_SIGBUS; 1937 break; 1938 default: 1939 WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret); 1940 ret = VM_FAULT_SIGBUS; 1941 break; 1942 } 1943 return ret; 1944 } 1945 1946 /** 1947 * i915_gem_release_mmap - remove physical page mappings 1948 * @obj: obj in question 1949 * 1950 * Preserve the reservation of the mmapping with the DRM core code, but 1951 * relinquish ownership of the pages back to the system. 1952 * 1953 * It is vital that we remove the page mapping if we have mapped a tiled 1954 * object through the GTT and then lose the fence register due to 1955 * resource pressure. Similarly if the object has been moved out of the 1956 * aperture, than pages mapped into userspace must be revoked. Removing the 1957 * mapping will then trigger a page fault on the next user access, allowing 1958 * fixup by i915_gem_fault(). 1959 */ 1960 void 1961 i915_gem_release_mmap(struct drm_i915_gem_object *obj) 1962 { 1963 struct drm_i915_private *i915 = to_i915(obj->base.dev); 1964 1965 /* Serialisation between user GTT access and our code depends upon 1966 * revoking the CPU's PTE whilst the mutex is held. The next user 1967 * pagefault then has to wait until we release the mutex. 1968 * 1969 * Note that RPM complicates somewhat by adding an additional 1970 * requirement that operations to the GGTT be made holding the RPM 1971 * wakeref. 1972 */ 1973 lockdep_assert_held(&i915->drm.struct_mutex); 1974 intel_runtime_pm_get(i915); 1975 1976 if (list_empty(&obj->userfault_link)) 1977 goto out; 1978 1979 list_del_init(&obj->userfault_link); 1980 drm_vma_node_unmap(&obj->base.vma_node, 1981 obj->base.dev->anon_inode->i_mapping); 1982 1983 /* Ensure that the CPU's PTE are revoked and there are not outstanding 1984 * memory transactions from userspace before we return. The TLB 1985 * flushing implied above by changing the PTE above *should* be 1986 * sufficient, an extra barrier here just provides us with a bit 1987 * of paranoid documentation about our requirement to serialise 1988 * memory writes before touching registers / GSM. 1989 */ 1990 wmb(); 1991 1992 out: 1993 intel_runtime_pm_put(i915); 1994 } 1995 1996 void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv) 1997 { 1998 struct drm_i915_gem_object *obj, *on; 1999 int i; 2000 2001 /* 2002 * Only called during RPM suspend. All users of the userfault_list 2003 * must be holding an RPM wakeref to ensure that this can not 2004 * run concurrently with themselves (and use the struct_mutex for 2005 * protection between themselves). 2006 */ 2007 2008 list_for_each_entry_safe(obj, on, 2009 &dev_priv->mm.userfault_list, userfault_link) { 2010 list_del_init(&obj->userfault_link); 2011 drm_vma_node_unmap(&obj->base.vma_node, 2012 obj->base.dev->anon_inode->i_mapping); 2013 } 2014 2015 /* The fence will be lost when the device powers down. If any were 2016 * in use by hardware (i.e. they are pinned), we should not be powering 2017 * down! All other fences will be reacquired by the user upon waking. 2018 */ 2019 for (i = 0; i < dev_priv->num_fence_regs; i++) { 2020 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i]; 2021 2022 if (WARN_ON(reg->pin_count)) 2023 continue; 2024 2025 if (!reg->vma) 2026 continue; 2027 2028 GEM_BUG_ON(!list_empty(®->vma->obj->userfault_link)); 2029 reg->dirty = true; 2030 } 2031 } 2032 2033 /** 2034 * i915_gem_get_ggtt_size - return required global GTT size for an object 2035 * @dev_priv: i915 device 2036 * @size: object size 2037 * @tiling_mode: tiling mode 2038 * 2039 * Return the required global GTT size for an object, taking into account 2040 * potential fence register mapping. 2041 */ 2042 u64 i915_gem_get_ggtt_size(struct drm_i915_private *dev_priv, 2043 u64 size, int tiling_mode) 2044 { 2045 u64 ggtt_size; 2046 2047 GEM_BUG_ON(size == 0); 2048 2049 if (INTEL_GEN(dev_priv) >= 4 || 2050 tiling_mode == I915_TILING_NONE) 2051 return size; 2052 2053 /* Previous chips need a power-of-two fence region when tiling */ 2054 if (IS_GEN3(dev_priv)) 2055 ggtt_size = 1024*1024; 2056 else 2057 ggtt_size = 512*1024; 2058 2059 while (ggtt_size < size) 2060 ggtt_size <<= 1; 2061 2062 return ggtt_size; 2063 } 2064 2065 /** 2066 * i915_gem_get_ggtt_alignment - return required global GTT alignment 2067 * @dev_priv: i915 device 2068 * @size: object size 2069 * @tiling_mode: tiling mode 2070 * @fenced: is fenced alignment required or not 2071 * 2072 * Return the required global GTT alignment for an object, taking into account 2073 * potential fence register mapping. 2074 */ 2075 u64 i915_gem_get_ggtt_alignment(struct drm_i915_private *dev_priv, u64 size, 2076 int tiling_mode, bool fenced) 2077 { 2078 GEM_BUG_ON(size == 0); 2079 2080 /* 2081 * Minimum alignment is 4k (GTT page size), but might be greater 2082 * if a fence register is needed for the object. 2083 */ 2084 if (INTEL_GEN(dev_priv) >= 4 || (!fenced && IS_G33(dev_priv)) || 2085 tiling_mode == I915_TILING_NONE) 2086 return 4096; 2087 2088 /* 2089 * Previous chips need to be aligned to the size of the smallest 2090 * fence register that can contain the object. 2091 */ 2092 return i915_gem_get_ggtt_size(dev_priv, size, tiling_mode); 2093 } 2094 2095 static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj) 2096 { 2097 struct drm_i915_private *dev_priv = to_i915(obj->base.dev); 2098 int err; 2099 2100 err = drm_gem_create_mmap_offset(&obj->base); 2101 if (!err) 2102 return 0; 2103 2104 /* We can idle the GPU locklessly to flush stale objects, but in order 2105 * to claim that space for ourselves, we need to take the big 2106 * struct_mutex to free the requests+objects and allocate our slot. 2107 */ 2108 err = i915_gem_wait_for_idle(dev_priv, I915_WAIT_INTERRUPTIBLE); 2109 if (err) 2110 return err; 2111 2112 err = i915_mutex_lock_interruptible(&dev_priv->drm); 2113 if (!err) { 2114 i915_gem_retire_requests(dev_priv); 2115 err = drm_gem_create_mmap_offset(&obj->base); 2116 mutex_unlock(&dev_priv->drm.struct_mutex); 2117 } 2118 2119 return err; 2120 } 2121 2122 static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj) 2123 { 2124 drm_gem_free_mmap_offset(&obj->base); 2125 } 2126 2127 int 2128 i915_gem_mmap_gtt(struct drm_file *file, 2129 struct drm_device *dev, 2130 uint32_t handle, 2131 uint64_t *offset) 2132 { 2133 struct drm_i915_gem_object *obj; 2134 int ret; 2135 2136 obj = i915_gem_object_lookup(file, handle); 2137 if (!obj) 2138 return -ENOENT; 2139 2140 ret = i915_gem_object_create_mmap_offset(obj); 2141 if (ret == 0) 2142 *offset = drm_vma_node_offset_addr(&obj->base.vma_node); 2143 2144 i915_gem_object_put(obj); 2145 return ret; 2146 } 2147 2148 /** 2149 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing 2150 * @dev: DRM device 2151 * @data: GTT mapping ioctl data 2152 * @file: GEM object info 2153 * 2154 * Simply returns the fake offset to userspace so it can mmap it. 2155 * The mmap call will end up in drm_gem_mmap(), which will set things 2156 * up so we can get faults in the handler above. 2157 * 2158 * The fault handler will take care of binding the object into the GTT 2159 * (since it may have been evicted to make room for something), allocating 2160 * a fence register, and mapping the appropriate aperture address into 2161 * userspace. 2162 */ 2163 int 2164 i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, 2165 struct drm_file *file) 2166 { 2167 struct drm_i915_gem_mmap_gtt *args = data; 2168 2169 return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset); 2170 } 2171 2172 /* Immediately discard the backing storage */ 2173 static void 2174 i915_gem_object_truncate(struct drm_i915_gem_object *obj) 2175 { 2176 i915_gem_object_free_mmap_offset(obj); 2177 2178 if (obj->base.filp == NULL) 2179 return; 2180 2181 /* Our goal here is to return as much of the memory as 2182 * is possible back to the system as we are called from OOM. 2183 * To do this we must instruct the shmfs to drop all of its 2184 * backing pages, *now*. 2185 */ 2186 shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1); 2187 obj->mm.madv = __I915_MADV_PURGED; 2188 } 2189 2190 /* Try to discard unwanted pages */ 2191 void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj) 2192 { 2193 struct address_space *mapping; 2194 2195 lockdep_assert_held(&obj->mm.lock); 2196 GEM_BUG_ON(obj->mm.pages); 2197 2198 switch (obj->mm.madv) { 2199 case I915_MADV_DONTNEED: 2200 i915_gem_object_truncate(obj); 2201 case __I915_MADV_PURGED: 2202 return; 2203 } 2204 2205 if (obj->base.filp == NULL) 2206 return; 2207 2208 mapping = obj->base.filp->f_mapping, 2209 invalidate_mapping_pages(mapping, 0, (loff_t)-1); 2210 } 2211 2212 static void 2213 i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj, 2214 struct sg_table *pages) 2215 { 2216 struct sgt_iter sgt_iter; 2217 struct page *page; 2218 2219 __i915_gem_object_release_shmem(obj, pages); 2220 2221 i915_gem_gtt_finish_pages(obj, pages); 2222 2223 if (i915_gem_object_needs_bit17_swizzle(obj)) 2224 i915_gem_object_save_bit_17_swizzle(obj, pages); 2225 2226 for_each_sgt_page(page, sgt_iter, pages) { 2227 if (obj->mm.dirty) 2228 set_page_dirty(page); 2229 2230 if (obj->mm.madv == I915_MADV_WILLNEED) 2231 mark_page_accessed(page); 2232 2233 put_page(page); 2234 } 2235 obj->mm.dirty = false; 2236 2237 sg_free_table(pages); 2238 kfree(pages); 2239 } 2240 2241 static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj) 2242 { 2243 struct radix_tree_iter iter; 2244 void **slot; 2245 2246 radix_tree_for_each_slot(slot, &obj->mm.get_page.radix, &iter, 0) 2247 radix_tree_delete(&obj->mm.get_page.radix, iter.index); 2248 } 2249 2250 void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj, 2251 enum i915_mm_subclass subclass) 2252 { 2253 struct sg_table *pages; 2254 2255 if (i915_gem_object_has_pinned_pages(obj)) 2256 return; 2257 2258 GEM_BUG_ON(obj->bind_count); 2259 if (!READ_ONCE(obj->mm.pages)) 2260 return; 2261 2262 /* May be called by shrinker from within get_pages() (on another bo) */ 2263 mutex_lock_nested(&obj->mm.lock, subclass); 2264 if (unlikely(atomic_read(&obj->mm.pages_pin_count))) 2265 goto unlock; 2266 2267 /* ->put_pages might need to allocate memory for the bit17 swizzle 2268 * array, hence protect them from being reaped by removing them from gtt 2269 * lists early. */ 2270 pages = fetch_and_zero(&obj->mm.pages); 2271 GEM_BUG_ON(!pages); 2272 2273 if (obj->mm.mapping) { 2274 void *ptr; 2275 2276 ptr = ptr_mask_bits(obj->mm.mapping); 2277 if (is_vmalloc_addr(ptr)) 2278 vunmap(ptr); 2279 else 2280 kunmap(kmap_to_page(ptr)); 2281 2282 obj->mm.mapping = NULL; 2283 } 2284 2285 __i915_gem_object_reset_page_iter(obj); 2286 2287 obj->ops->put_pages(obj, pages); 2288 unlock: 2289 mutex_unlock(&obj->mm.lock); 2290 } 2291 2292 static unsigned int swiotlb_max_size(void) 2293 { 2294 #if IS_ENABLED(CONFIG_SWIOTLB) 2295 return rounddown(swiotlb_nr_tbl() << IO_TLB_SHIFT, PAGE_SIZE); 2296 #else 2297 return 0; 2298 #endif 2299 } 2300 2301 static void i915_sg_trim(struct sg_table *orig_st) 2302 { 2303 struct sg_table new_st; 2304 struct scatterlist *sg, *new_sg; 2305 unsigned int i; 2306 2307 if (orig_st->nents == orig_st->orig_nents) 2308 return; 2309 2310 if (sg_alloc_table(&new_st, orig_st->nents, GFP_KERNEL)) 2311 return; 2312 2313 new_sg = new_st.sgl; 2314 for_each_sg(orig_st->sgl, sg, orig_st->nents, i) { 2315 sg_set_page(new_sg, sg_page(sg), sg->length, 0); 2316 /* called before being DMA mapped, no need to copy sg->dma_* */ 2317 new_sg = sg_next(new_sg); 2318 } 2319 2320 sg_free_table(orig_st); 2321 2322 *orig_st = new_st; 2323 } 2324 2325 static struct sg_table * 2326 i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) 2327 { 2328 struct drm_i915_private *dev_priv = to_i915(obj->base.dev); 2329 int page_count, i; 2330 struct address_space *mapping; 2331 struct sg_table *st; 2332 struct scatterlist *sg; 2333 struct sgt_iter sgt_iter; 2334 struct page *page; 2335 unsigned long last_pfn = 0; /* suppress gcc warning */ 2336 unsigned int max_segment; 2337 int ret; 2338 gfp_t gfp; 2339 2340 /* Assert that the object is not currently in any GPU domain. As it 2341 * wasn't in the GTT, there shouldn't be any way it could have been in 2342 * a GPU cache 2343 */ 2344 GEM_BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS); 2345 GEM_BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS); 2346 2347 max_segment = swiotlb_max_size(); 2348 if (!max_segment) 2349 max_segment = rounddown(UINT_MAX, PAGE_SIZE); 2350 2351 st = kmalloc(sizeof(*st), GFP_KERNEL); 2352 if (st == NULL) 2353 return ERR_PTR(-ENOMEM); 2354 2355 page_count = obj->base.size / PAGE_SIZE; 2356 if (sg_alloc_table(st, page_count, GFP_KERNEL)) { 2357 kfree(st); 2358 return ERR_PTR(-ENOMEM); 2359 } 2360 2361 /* Get the list of pages out of our struct file. They'll be pinned 2362 * at this point until we release them. 2363 * 2364 * Fail silently without starting the shrinker 2365 */ 2366 mapping = obj->base.filp->f_mapping; 2367 gfp = mapping_gfp_constraint(mapping, ~(__GFP_IO | __GFP_RECLAIM)); 2368 gfp |= __GFP_NORETRY | __GFP_NOWARN; 2369 sg = st->sgl; 2370 st->nents = 0; 2371 for (i = 0; i < page_count; i++) { 2372 page = shmem_read_mapping_page_gfp(mapping, i, gfp); 2373 if (IS_ERR(page)) { 2374 i915_gem_shrink(dev_priv, 2375 page_count, 2376 I915_SHRINK_BOUND | 2377 I915_SHRINK_UNBOUND | 2378 I915_SHRINK_PURGEABLE); 2379 page = shmem_read_mapping_page_gfp(mapping, i, gfp); 2380 } 2381 if (IS_ERR(page)) { 2382 /* We've tried hard to allocate the memory by reaping 2383 * our own buffer, now let the real VM do its job and 2384 * go down in flames if truly OOM. 2385 */ 2386 page = shmem_read_mapping_page(mapping, i); 2387 if (IS_ERR(page)) { 2388 ret = PTR_ERR(page); 2389 goto err_sg; 2390 } 2391 } 2392 if (!i || 2393 sg->length >= max_segment || 2394 page_to_pfn(page) != last_pfn + 1) { 2395 if (i) 2396 sg = sg_next(sg); 2397 st->nents++; 2398 sg_set_page(sg, page, PAGE_SIZE, 0); 2399 } else { 2400 sg->length += PAGE_SIZE; 2401 } 2402 last_pfn = page_to_pfn(page); 2403 2404 /* Check that the i965g/gm workaround works. */ 2405 WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL)); 2406 } 2407 if (sg) /* loop terminated early; short sg table */ 2408 sg_mark_end(sg); 2409 2410 /* Trim unused sg entries to avoid wasting memory. */ 2411 i915_sg_trim(st); 2412 2413 ret = i915_gem_gtt_prepare_pages(obj, st); 2414 if (ret) 2415 goto err_pages; 2416 2417 if (i915_gem_object_needs_bit17_swizzle(obj)) 2418 i915_gem_object_do_bit_17_swizzle(obj, st); 2419 2420 return st; 2421 2422 err_sg: 2423 sg_mark_end(sg); 2424 err_pages: 2425 for_each_sgt_page(page, sgt_iter, st) 2426 put_page(page); 2427 sg_free_table(st); 2428 kfree(st); 2429 2430 /* shmemfs first checks if there is enough memory to allocate the page 2431 * and reports ENOSPC should there be insufficient, along with the usual 2432 * ENOMEM for a genuine allocation failure. 2433 * 2434 * We use ENOSPC in our driver to mean that we have run out of aperture 2435 * space and so want to translate the error from shmemfs back to our 2436 * usual understanding of ENOMEM. 2437 */ 2438 if (ret == -ENOSPC) 2439 ret = -ENOMEM; 2440 2441 return ERR_PTR(ret); 2442 } 2443 2444 void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj, 2445 struct sg_table *pages) 2446 { 2447 lockdep_assert_held(&obj->mm.lock); 2448 2449 obj->mm.get_page.sg_pos = pages->sgl; 2450 obj->mm.get_page.sg_idx = 0; 2451 2452 obj->mm.pages = pages; 2453 2454 if (i915_gem_object_is_tiled(obj) && 2455 to_i915(obj->base.dev)->quirks & QUIRK_PIN_SWIZZLED_PAGES) { 2456 GEM_BUG_ON(obj->mm.quirked); 2457 __i915_gem_object_pin_pages(obj); 2458 obj->mm.quirked = true; 2459 } 2460 } 2461 2462 static int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj) 2463 { 2464 struct sg_table *pages; 2465 2466 GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj)); 2467 2468 if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) { 2469 DRM_DEBUG("Attempting to obtain a purgeable object\n"); 2470 return -EFAULT; 2471 } 2472 2473 pages = obj->ops->get_pages(obj); 2474 if (unlikely(IS_ERR(pages))) 2475 return PTR_ERR(pages); 2476 2477 __i915_gem_object_set_pages(obj, pages); 2478 return 0; 2479 } 2480 2481 /* Ensure that the associated pages are gathered from the backing storage 2482 * and pinned into our object. i915_gem_object_pin_pages() may be called 2483 * multiple times before they are released by a single call to 2484 * i915_gem_object_unpin_pages() - once the pages are no longer referenced 2485 * either as a result of memory pressure (reaping pages under the shrinker) 2486 * or as the object is itself released. 2487 */ 2488 int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj) 2489 { 2490 int err; 2491 2492 err = mutex_lock_interruptible(&obj->mm.lock); 2493 if (err) 2494 return err; 2495 2496 if (unlikely(!obj->mm.pages)) { 2497 err = ____i915_gem_object_get_pages(obj); 2498 if (err) 2499 goto unlock; 2500 2501 smp_mb__before_atomic(); 2502 } 2503 atomic_inc(&obj->mm.pages_pin_count); 2504 2505 unlock: 2506 mutex_unlock(&obj->mm.lock); 2507 return err; 2508 } 2509 2510 /* The 'mapping' part of i915_gem_object_pin_map() below */ 2511 static void *i915_gem_object_map(const struct drm_i915_gem_object *obj, 2512 enum i915_map_type type) 2513 { 2514 unsigned long n_pages = obj->base.size >> PAGE_SHIFT; 2515 struct sg_table *sgt = obj->mm.pages; 2516 struct sgt_iter sgt_iter; 2517 struct page *page; 2518 struct page *stack_pages[32]; 2519 struct page **pages = stack_pages; 2520 unsigned long i = 0; 2521 pgprot_t pgprot; 2522 void *addr; 2523 2524 /* A single page can always be kmapped */ 2525 if (n_pages == 1 && type == I915_MAP_WB) 2526 return kmap(sg_page(sgt->sgl)); 2527 2528 if (n_pages > ARRAY_SIZE(stack_pages)) { 2529 /* Too big for stack -- allocate temporary array instead */ 2530 pages = drm_malloc_gfp(n_pages, sizeof(*pages), GFP_TEMPORARY); 2531 if (!pages) 2532 return NULL; 2533 } 2534 2535 for_each_sgt_page(page, sgt_iter, sgt) 2536 pages[i++] = page; 2537 2538 /* Check that we have the expected number of pages */ 2539 GEM_BUG_ON(i != n_pages); 2540 2541 switch (type) { 2542 case I915_MAP_WB: 2543 pgprot = PAGE_KERNEL; 2544 break; 2545 case I915_MAP_WC: 2546 pgprot = pgprot_writecombine(PAGE_KERNEL_IO); 2547 break; 2548 } 2549 addr = vmap(pages, n_pages, 0, pgprot); 2550 2551 if (pages != stack_pages) 2552 drm_free_large(pages); 2553 2554 return addr; 2555 } 2556 2557 /* get, pin, and map the pages of the object into kernel space */ 2558 void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj, 2559 enum i915_map_type type) 2560 { 2561 enum i915_map_type has_type; 2562 bool pinned; 2563 void *ptr; 2564 int ret; 2565 2566 GEM_BUG_ON(!i915_gem_object_has_struct_page(obj)); 2567 2568 ret = mutex_lock_interruptible(&obj->mm.lock); 2569 if (ret) 2570 return ERR_PTR(ret); 2571 2572 pinned = true; 2573 if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) { 2574 if (unlikely(!obj->mm.pages)) { 2575 ret = ____i915_gem_object_get_pages(obj); 2576 if (ret) 2577 goto err_unlock; 2578 2579 smp_mb__before_atomic(); 2580 } 2581 atomic_inc(&obj->mm.pages_pin_count); 2582 pinned = false; 2583 } 2584 GEM_BUG_ON(!obj->mm.pages); 2585 2586 ptr = ptr_unpack_bits(obj->mm.mapping, has_type); 2587 if (ptr && has_type != type) { 2588 if (pinned) { 2589 ret = -EBUSY; 2590 goto err_unpin; 2591 } 2592 2593 if (is_vmalloc_addr(ptr)) 2594 vunmap(ptr); 2595 else 2596 kunmap(kmap_to_page(ptr)); 2597 2598 ptr = obj->mm.mapping = NULL; 2599 } 2600 2601 if (!ptr) { 2602 ptr = i915_gem_object_map(obj, type); 2603 if (!ptr) { 2604 ret = -ENOMEM; 2605 goto err_unpin; 2606 } 2607 2608 obj->mm.mapping = ptr_pack_bits(ptr, type); 2609 } 2610 2611 out_unlock: 2612 mutex_unlock(&obj->mm.lock); 2613 return ptr; 2614 2615 err_unpin: 2616 atomic_dec(&obj->mm.pages_pin_count); 2617 err_unlock: 2618 ptr = ERR_PTR(ret); 2619 goto out_unlock; 2620 } 2621 2622 static bool i915_context_is_banned(const struct i915_gem_context *ctx) 2623 { 2624 unsigned long elapsed; 2625 2626 if (ctx->hang_stats.banned) 2627 return true; 2628 2629 elapsed = get_seconds() - ctx->hang_stats.guilty_ts; 2630 if (ctx->hang_stats.ban_period_seconds && 2631 elapsed <= ctx->hang_stats.ban_period_seconds) { 2632 DRM_DEBUG("context hanging too fast, banning!\n"); 2633 return true; 2634 } 2635 2636 return false; 2637 } 2638 2639 static void i915_set_reset_status(struct i915_gem_context *ctx, 2640 const bool guilty) 2641 { 2642 struct i915_ctx_hang_stats *hs = &ctx->hang_stats; 2643 2644 if (guilty) { 2645 hs->banned = i915_context_is_banned(ctx); 2646 hs->batch_active++; 2647 hs->guilty_ts = get_seconds(); 2648 } else { 2649 hs->batch_pending++; 2650 } 2651 } 2652 2653 struct drm_i915_gem_request * 2654 i915_gem_find_active_request(struct intel_engine_cs *engine) 2655 { 2656 struct drm_i915_gem_request *request; 2657 2658 /* We are called by the error capture and reset at a random 2659 * point in time. In particular, note that neither is crucially 2660 * ordered with an interrupt. After a hang, the GPU is dead and we 2661 * assume that no more writes can happen (we waited long enough for 2662 * all writes that were in transaction to be flushed) - adding an 2663 * extra delay for a recent interrupt is pointless. Hence, we do 2664 * not need an engine->irq_seqno_barrier() before the seqno reads. 2665 */ 2666 list_for_each_entry(request, &engine->timeline->requests, link) { 2667 if (__i915_gem_request_completed(request)) 2668 continue; 2669 2670 return request; 2671 } 2672 2673 return NULL; 2674 } 2675 2676 static void reset_request(struct drm_i915_gem_request *request) 2677 { 2678 void *vaddr = request->ring->vaddr; 2679 u32 head; 2680 2681 /* As this request likely depends on state from the lost 2682 * context, clear out all the user operations leaving the 2683 * breadcrumb at the end (so we get the fence notifications). 2684 */ 2685 head = request->head; 2686 if (request->postfix < head) { 2687 memset(vaddr + head, 0, request->ring->size - head); 2688 head = 0; 2689 } 2690 memset(vaddr + head, 0, request->postfix - head); 2691 } 2692 2693 static void i915_gem_reset_engine(struct intel_engine_cs *engine) 2694 { 2695 struct drm_i915_gem_request *request; 2696 struct i915_gem_context *incomplete_ctx; 2697 struct intel_timeline *timeline; 2698 bool ring_hung; 2699 2700 if (engine->irq_seqno_barrier) 2701 engine->irq_seqno_barrier(engine); 2702 2703 request = i915_gem_find_active_request(engine); 2704 if (!request) 2705 return; 2706 2707 ring_hung = engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG; 2708 if (engine->hangcheck.seqno != intel_engine_get_seqno(engine)) 2709 ring_hung = false; 2710 2711 i915_set_reset_status(request->ctx, ring_hung); 2712 if (!ring_hung) 2713 return; 2714 2715 DRM_DEBUG_DRIVER("resetting %s to restart from tail of request 0x%x\n", 2716 engine->name, request->global_seqno); 2717 2718 /* Setup the CS to resume from the breadcrumb of the hung request */ 2719 engine->reset_hw(engine, request); 2720 2721 /* Users of the default context do not rely on logical state 2722 * preserved between batches. They have to emit full state on 2723 * every batch and so it is safe to execute queued requests following 2724 * the hang. 2725 * 2726 * Other contexts preserve state, now corrupt. We want to skip all 2727 * queued requests that reference the corrupt context. 2728 */ 2729 incomplete_ctx = request->ctx; 2730 if (i915_gem_context_is_default(incomplete_ctx)) 2731 return; 2732 2733 list_for_each_entry_continue(request, &engine->timeline->requests, link) 2734 if (request->ctx == incomplete_ctx) 2735 reset_request(request); 2736 2737 timeline = i915_gem_context_lookup_timeline(incomplete_ctx, engine); 2738 list_for_each_entry(request, &timeline->requests, link) 2739 reset_request(request); 2740 } 2741 2742 void i915_gem_reset(struct drm_i915_private *dev_priv) 2743 { 2744 struct intel_engine_cs *engine; 2745 enum intel_engine_id id; 2746 2747 lockdep_assert_held(&dev_priv->drm.struct_mutex); 2748 2749 i915_gem_retire_requests(dev_priv); 2750 2751 for_each_engine(engine, dev_priv, id) 2752 i915_gem_reset_engine(engine); 2753 2754 i915_gem_restore_fences(dev_priv); 2755 2756 if (dev_priv->gt.awake) { 2757 intel_sanitize_gt_powersave(dev_priv); 2758 intel_enable_gt_powersave(dev_priv); 2759 if (INTEL_GEN(dev_priv) >= 6) 2760 gen6_rps_busy(dev_priv); 2761 } 2762 } 2763 2764 static void nop_submit_request(struct drm_i915_gem_request *request) 2765 { 2766 i915_gem_request_submit(request); 2767 intel_engine_init_global_seqno(request->engine, request->global_seqno); 2768 } 2769 2770 static void i915_gem_cleanup_engine(struct intel_engine_cs *engine) 2771 { 2772 engine->submit_request = nop_submit_request; 2773 2774 /* Mark all pending requests as complete so that any concurrent 2775 * (lockless) lookup doesn't try and wait upon the request as we 2776 * reset it. 2777 */ 2778 intel_engine_init_global_seqno(engine, 2779 intel_engine_last_submit(engine)); 2780 2781 /* 2782 * Clear the execlists queue up before freeing the requests, as those 2783 * are the ones that keep the context and ringbuffer backing objects 2784 * pinned in place. 2785 */ 2786 2787 if (i915.enable_execlists) { 2788 unsigned long flags; 2789 2790 spin_lock_irqsave(&engine->timeline->lock, flags); 2791 2792 i915_gem_request_put(engine->execlist_port[0].request); 2793 i915_gem_request_put(engine->execlist_port[1].request); 2794 memset(engine->execlist_port, 0, sizeof(engine->execlist_port)); 2795 engine->execlist_queue = RB_ROOT; 2796 engine->execlist_first = NULL; 2797 2798 spin_unlock_irqrestore(&engine->timeline->lock, flags); 2799 } 2800 } 2801 2802 void i915_gem_set_wedged(struct drm_i915_private *dev_priv) 2803 { 2804 struct intel_engine_cs *engine; 2805 enum intel_engine_id id; 2806 2807 lockdep_assert_held(&dev_priv->drm.struct_mutex); 2808 set_bit(I915_WEDGED, &dev_priv->gpu_error.flags); 2809 2810 i915_gem_context_lost(dev_priv); 2811 for_each_engine(engine, dev_priv, id) 2812 i915_gem_cleanup_engine(engine); 2813 mod_delayed_work(dev_priv->wq, &dev_priv->gt.idle_work, 0); 2814 2815 i915_gem_retire_requests(dev_priv); 2816 } 2817 2818 static void 2819 i915_gem_retire_work_handler(struct work_struct *work) 2820 { 2821 struct drm_i915_private *dev_priv = 2822 container_of(work, typeof(*dev_priv), gt.retire_work.work); 2823 struct drm_device *dev = &dev_priv->drm; 2824 2825 /* Come back later if the device is busy... */ 2826 if (mutex_trylock(&dev->struct_mutex)) { 2827 i915_gem_retire_requests(dev_priv); 2828 mutex_unlock(&dev->struct_mutex); 2829 } 2830 2831 /* Keep the retire handler running until we are finally idle. 2832 * We do not need to do this test under locking as in the worst-case 2833 * we queue the retire worker once too often. 2834 */ 2835 if (READ_ONCE(dev_priv->gt.awake)) { 2836 i915_queue_hangcheck(dev_priv); 2837 queue_delayed_work(dev_priv->wq, 2838 &dev_priv->gt.retire_work, 2839 round_jiffies_up_relative(HZ)); 2840 } 2841 } 2842 2843 static void 2844 i915_gem_idle_work_handler(struct work_struct *work) 2845 { 2846 struct drm_i915_private *dev_priv = 2847 container_of(work, typeof(*dev_priv), gt.idle_work.work); 2848 struct drm_device *dev = &dev_priv->drm; 2849 struct intel_engine_cs *engine; 2850 enum intel_engine_id id; 2851 bool rearm_hangcheck; 2852 2853 if (!READ_ONCE(dev_priv->gt.awake)) 2854 return; 2855 2856 /* 2857 * Wait for last execlists context complete, but bail out in case a 2858 * new request is submitted. 2859 */ 2860 wait_for(READ_ONCE(dev_priv->gt.active_requests) || 2861 intel_execlists_idle(dev_priv), 10); 2862 2863 if (READ_ONCE(dev_priv->gt.active_requests)) 2864 return; 2865 2866 rearm_hangcheck = 2867 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work); 2868 2869 if (!mutex_trylock(&dev->struct_mutex)) { 2870 /* Currently busy, come back later */ 2871 mod_delayed_work(dev_priv->wq, 2872 &dev_priv->gt.idle_work, 2873 msecs_to_jiffies(50)); 2874 goto out_rearm; 2875 } 2876 2877 /* 2878 * New request retired after this work handler started, extend active 2879 * period until next instance of the work. 2880 */ 2881 if (work_pending(work)) 2882 goto out_unlock; 2883 2884 if (dev_priv->gt.active_requests) 2885 goto out_unlock; 2886 2887 if (wait_for(intel_execlists_idle(dev_priv), 10)) 2888 DRM_ERROR("Timeout waiting for engines to idle\n"); 2889 2890 for_each_engine(engine, dev_priv, id) 2891 i915_gem_batch_pool_fini(&engine->batch_pool); 2892 2893 GEM_BUG_ON(!dev_priv->gt.awake); 2894 dev_priv->gt.awake = false; 2895 rearm_hangcheck = false; 2896 2897 if (INTEL_GEN(dev_priv) >= 6) 2898 gen6_rps_idle(dev_priv); 2899 intel_runtime_pm_put(dev_priv); 2900 out_unlock: 2901 mutex_unlock(&dev->struct_mutex); 2902 2903 out_rearm: 2904 if (rearm_hangcheck) { 2905 GEM_BUG_ON(!dev_priv->gt.awake); 2906 i915_queue_hangcheck(dev_priv); 2907 } 2908 } 2909 2910 void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file) 2911 { 2912 struct drm_i915_gem_object *obj = to_intel_bo(gem); 2913 struct drm_i915_file_private *fpriv = file->driver_priv; 2914 struct i915_vma *vma, *vn; 2915 2916 mutex_lock(&obj->base.dev->struct_mutex); 2917 list_for_each_entry_safe(vma, vn, &obj->vma_list, obj_link) 2918 if (vma->vm->file == fpriv) 2919 i915_vma_close(vma); 2920 2921 if (i915_gem_object_is_active(obj) && 2922 !i915_gem_object_has_active_reference(obj)) { 2923 i915_gem_object_set_active_reference(obj); 2924 i915_gem_object_get(obj); 2925 } 2926 mutex_unlock(&obj->base.dev->struct_mutex); 2927 } 2928 2929 static unsigned long to_wait_timeout(s64 timeout_ns) 2930 { 2931 if (timeout_ns < 0) 2932 return MAX_SCHEDULE_TIMEOUT; 2933 2934 if (timeout_ns == 0) 2935 return 0; 2936 2937 return nsecs_to_jiffies_timeout(timeout_ns); 2938 } 2939 2940 /** 2941 * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT 2942 * @dev: drm device pointer 2943 * @data: ioctl data blob 2944 * @file: drm file pointer 2945 * 2946 * Returns 0 if successful, else an error is returned with the remaining time in 2947 * the timeout parameter. 2948 * -ETIME: object is still busy after timeout 2949 * -ERESTARTSYS: signal interrupted the wait 2950 * -ENONENT: object doesn't exist 2951 * Also possible, but rare: 2952 * -EAGAIN: GPU wedged 2953 * -ENOMEM: damn 2954 * -ENODEV: Internal IRQ fail 2955 * -E?: The add request failed 2956 * 2957 * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any 2958 * non-zero timeout parameter the wait ioctl will wait for the given number of 2959 * nanoseconds on an object becoming unbusy. Since the wait itself does so 2960 * without holding struct_mutex the object may become re-busied before this 2961 * function completes. A similar but shorter * race condition exists in the busy 2962 * ioctl 2963 */ 2964 int 2965 i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) 2966 { 2967 struct drm_i915_gem_wait *args = data; 2968 struct drm_i915_gem_object *obj; 2969 ktime_t start; 2970 long ret; 2971 2972 if (args->flags != 0) 2973 return -EINVAL; 2974 2975 obj = i915_gem_object_lookup(file, args->bo_handle); 2976 if (!obj) 2977 return -ENOENT; 2978 2979 start = ktime_get(); 2980 2981 ret = i915_gem_object_wait(obj, 2982 I915_WAIT_INTERRUPTIBLE | I915_WAIT_ALL, 2983 to_wait_timeout(args->timeout_ns), 2984 to_rps_client(file)); 2985 2986 if (args->timeout_ns > 0) { 2987 args->timeout_ns -= ktime_to_ns(ktime_sub(ktime_get(), start)); 2988 if (args->timeout_ns < 0) 2989 args->timeout_ns = 0; 2990 } 2991 2992 i915_gem_object_put(obj); 2993 return ret; 2994 } 2995 2996 static int wait_for_timeline(struct i915_gem_timeline *tl, unsigned int flags) 2997 { 2998 int ret, i; 2999 3000 for (i = 0; i < ARRAY_SIZE(tl->engine); i++) { 3001 ret = i915_gem_active_wait(&tl->engine[i].last_request, flags); 3002 if (ret) 3003 return ret; 3004 } 3005 3006 return 0; 3007 } 3008 3009 int i915_gem_wait_for_idle(struct drm_i915_private *i915, unsigned int flags) 3010 { 3011 int ret; 3012 3013 if (flags & I915_WAIT_LOCKED) { 3014 struct i915_gem_timeline *tl; 3015 3016 lockdep_assert_held(&i915->drm.struct_mutex); 3017 3018 list_for_each_entry(tl, &i915->gt.timelines, link) { 3019 ret = wait_for_timeline(tl, flags); 3020 if (ret) 3021 return ret; 3022 } 3023 } else { 3024 ret = wait_for_timeline(&i915->gt.global_timeline, flags); 3025 if (ret) 3026 return ret; 3027 } 3028 3029 return 0; 3030 } 3031 3032 void i915_gem_clflush_object(struct drm_i915_gem_object *obj, 3033 bool force) 3034 { 3035 /* If we don't have a page list set up, then we're not pinned 3036 * to GPU, and we can ignore the cache flush because it'll happen 3037 * again at bind time. 3038 */ 3039 if (!obj->mm.pages) 3040 return; 3041 3042 /* 3043 * Stolen memory is always coherent with the GPU as it is explicitly 3044 * marked as wc by the system, or the system is cache-coherent. 3045 */ 3046 if (obj->stolen || obj->phys_handle) 3047 return; 3048 3049 /* If the GPU is snooping the contents of the CPU cache, 3050 * we do not need to manually clear the CPU cache lines. However, 3051 * the caches are only snooped when the render cache is 3052 * flushed/invalidated. As we always have to emit invalidations 3053 * and flushes when moving into and out of the RENDER domain, correct 3054 * snooping behaviour occurs naturally as the result of our domain 3055 * tracking. 3056 */ 3057 if (!force && cpu_cache_is_coherent(obj->base.dev, obj->cache_level)) { 3058 obj->cache_dirty = true; 3059 return; 3060 } 3061 3062 trace_i915_gem_object_clflush(obj); 3063 drm_clflush_sg(obj->mm.pages); 3064 obj->cache_dirty = false; 3065 } 3066 3067 /** Flushes the GTT write domain for the object if it's dirty. */ 3068 static void 3069 i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj) 3070 { 3071 struct drm_i915_private *dev_priv = to_i915(obj->base.dev); 3072 3073 if (obj->base.write_domain != I915_GEM_DOMAIN_GTT) 3074 return; 3075 3076 /* No actual flushing is required for the GTT write domain. Writes 3077 * to it "immediately" go to main memory as far as we know, so there's 3078 * no chipset flush. It also doesn't land in render cache. 3079 * 3080 * However, we do have to enforce the order so that all writes through 3081 * the GTT land before any writes to the device, such as updates to 3082 * the GATT itself. 3083 * 3084 * We also have to wait a bit for the writes to land from the GTT. 3085 * An uncached read (i.e. mmio) seems to be ideal for the round-trip 3086 * timing. This issue has only been observed when switching quickly 3087 * between GTT writes and CPU reads from inside the kernel on recent hw, 3088 * and it appears to only affect discrete GTT blocks (i.e. on LLC 3089 * system agents we cannot reproduce this behaviour). 3090 */ 3091 wmb(); 3092 if (INTEL_GEN(dev_priv) >= 6 && !HAS_LLC(dev_priv)) 3093 POSTING_READ(RING_ACTHD(dev_priv->engine[RCS]->mmio_base)); 3094 3095 intel_fb_obj_flush(obj, false, write_origin(obj, I915_GEM_DOMAIN_GTT)); 3096 3097 obj->base.write_domain = 0; 3098 trace_i915_gem_object_change_domain(obj, 3099 obj->base.read_domains, 3100 I915_GEM_DOMAIN_GTT); 3101 } 3102 3103 /** Flushes the CPU write domain for the object if it's dirty. */ 3104 static void 3105 i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj) 3106 { 3107 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) 3108 return; 3109 3110 i915_gem_clflush_object(obj, obj->pin_display); 3111 intel_fb_obj_flush(obj, false, ORIGIN_CPU); 3112 3113 obj->base.write_domain = 0; 3114 trace_i915_gem_object_change_domain(obj, 3115 obj->base.read_domains, 3116 I915_GEM_DOMAIN_CPU); 3117 } 3118 3119 /** 3120 * Moves a single object to the GTT read, and possibly write domain. 3121 * @obj: object to act on 3122 * @write: ask for write access or read only 3123 * 3124 * This function returns when the move is complete, including waiting on 3125 * flushes to occur. 3126 */ 3127 int 3128 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) 3129 { 3130 uint32_t old_write_domain, old_read_domains; 3131 int ret; 3132 3133 lockdep_assert_held(&obj->base.dev->struct_mutex); 3134 3135 ret = i915_gem_object_wait(obj, 3136 I915_WAIT_INTERRUPTIBLE | 3137 I915_WAIT_LOCKED | 3138 (write ? I915_WAIT_ALL : 0), 3139 MAX_SCHEDULE_TIMEOUT, 3140 NULL); 3141 if (ret) 3142 return ret; 3143 3144 if (obj->base.write_domain == I915_GEM_DOMAIN_GTT) 3145 return 0; 3146 3147 /* Flush and acquire obj->pages so that we are coherent through 3148 * direct access in memory with previous cached writes through 3149 * shmemfs and that our cache domain tracking remains valid. 3150 * For example, if the obj->filp was moved to swap without us 3151 * being notified and releasing the pages, we would mistakenly 3152 * continue to assume that the obj remained out of the CPU cached 3153 * domain. 3154 */ 3155 ret = i915_gem_object_pin_pages(obj); 3156 if (ret) 3157 return ret; 3158 3159 i915_gem_object_flush_cpu_write_domain(obj); 3160 3161 /* Serialise direct access to this object with the barriers for 3162 * coherent writes from the GPU, by effectively invalidating the 3163 * GTT domain upon first access. 3164 */ 3165 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0) 3166 mb(); 3167 3168 old_write_domain = obj->base.write_domain; 3169 old_read_domains = obj->base.read_domains; 3170 3171 /* It should now be out of any other write domains, and we can update 3172 * the domain values for our changes. 3173 */ 3174 GEM_BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0); 3175 obj->base.read_domains |= I915_GEM_DOMAIN_GTT; 3176 if (write) { 3177 obj->base.read_domains = I915_GEM_DOMAIN_GTT; 3178 obj->base.write_domain = I915_GEM_DOMAIN_GTT; 3179 obj->mm.dirty = true; 3180 } 3181 3182 trace_i915_gem_object_change_domain(obj, 3183 old_read_domains, 3184 old_write_domain); 3185 3186 i915_gem_object_unpin_pages(obj); 3187 return 0; 3188 } 3189 3190 /** 3191 * Changes the cache-level of an object across all VMA. 3192 * @obj: object to act on 3193 * @cache_level: new cache level to set for the object 3194 * 3195 * After this function returns, the object will be in the new cache-level 3196 * across all GTT and the contents of the backing storage will be coherent, 3197 * with respect to the new cache-level. In order to keep the backing storage 3198 * coherent for all users, we only allow a single cache level to be set 3199 * globally on the object and prevent it from being changed whilst the 3200 * hardware is reading from the object. That is if the object is currently 3201 * on the scanout it will be set to uncached (or equivalent display 3202 * cache coherency) and all non-MOCS GPU access will also be uncached so 3203 * that all direct access to the scanout remains coherent. 3204 */ 3205 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, 3206 enum i915_cache_level cache_level) 3207 { 3208 struct i915_vma *vma; 3209 int ret; 3210 3211 lockdep_assert_held(&obj->base.dev->struct_mutex); 3212 3213 if (obj->cache_level == cache_level) 3214 return 0; 3215 3216 /* Inspect the list of currently bound VMA and unbind any that would 3217 * be invalid given the new cache-level. This is principally to 3218 * catch the issue of the CS prefetch crossing page boundaries and 3219 * reading an invalid PTE on older architectures. 3220 */ 3221 restart: 3222 list_for_each_entry(vma, &obj->vma_list, obj_link) { 3223 if (!drm_mm_node_allocated(&vma->node)) 3224 continue; 3225 3226 if (i915_vma_is_pinned(vma)) { 3227 DRM_DEBUG("can not change the cache level of pinned objects\n"); 3228 return -EBUSY; 3229 } 3230 3231 if (i915_gem_valid_gtt_space(vma, cache_level)) 3232 continue; 3233 3234 ret = i915_vma_unbind(vma); 3235 if (ret) 3236 return ret; 3237 3238 /* As unbinding may affect other elements in the 3239 * obj->vma_list (due to side-effects from retiring 3240 * an active vma), play safe and restart the iterator. 3241 */ 3242 goto restart; 3243 } 3244 3245 /* We can reuse the existing drm_mm nodes but need to change the 3246 * cache-level on the PTE. We could simply unbind them all and 3247 * rebind with the correct cache-level on next use. However since 3248 * we already have a valid slot, dma mapping, pages etc, we may as 3249 * rewrite the PTE in the belief that doing so tramples upon less 3250 * state and so involves less work. 3251 */ 3252 if (obj->bind_count) { 3253 /* Before we change the PTE, the GPU must not be accessing it. 3254 * If we wait upon the object, we know that all the bound 3255 * VMA are no longer active. 3256 */ 3257 ret = i915_gem_object_wait(obj, 3258 I915_WAIT_INTERRUPTIBLE | 3259 I915_WAIT_LOCKED | 3260 I915_WAIT_ALL, 3261 MAX_SCHEDULE_TIMEOUT, 3262 NULL); 3263 if (ret) 3264 return ret; 3265 3266 if (!HAS_LLC(to_i915(obj->base.dev)) && 3267 cache_level != I915_CACHE_NONE) { 3268 /* Access to snoopable pages through the GTT is 3269 * incoherent and on some machines causes a hard 3270 * lockup. Relinquish the CPU mmaping to force 3271 * userspace to refault in the pages and we can 3272 * then double check if the GTT mapping is still 3273 * valid for that pointer access. 3274 */ 3275 i915_gem_release_mmap(obj); 3276 3277 /* As we no longer need a fence for GTT access, 3278 * we can relinquish it now (and so prevent having 3279 * to steal a fence from someone else on the next 3280 * fence request). Note GPU activity would have 3281 * dropped the fence as all snoopable access is 3282 * supposed to be linear. 3283 */ 3284 list_for_each_entry(vma, &obj->vma_list, obj_link) { 3285 ret = i915_vma_put_fence(vma); 3286 if (ret) 3287 return ret; 3288 } 3289 } else { 3290 /* We either have incoherent backing store and 3291 * so no GTT access or the architecture is fully 3292 * coherent. In such cases, existing GTT mmaps 3293 * ignore the cache bit in the PTE and we can 3294 * rewrite it without confusing the GPU or having 3295 * to force userspace to fault back in its mmaps. 3296 */ 3297 } 3298 3299 list_for_each_entry(vma, &obj->vma_list, obj_link) { 3300 if (!drm_mm_node_allocated(&vma->node)) 3301 continue; 3302 3303 ret = i915_vma_bind(vma, cache_level, PIN_UPDATE); 3304 if (ret) 3305 return ret; 3306 } 3307 } 3308 3309 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU && 3310 cpu_cache_is_coherent(obj->base.dev, obj->cache_level)) 3311 obj->cache_dirty = true; 3312 3313 list_for_each_entry(vma, &obj->vma_list, obj_link) 3314 vma->node.color = cache_level; 3315 obj->cache_level = cache_level; 3316 3317 return 0; 3318 } 3319 3320 int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data, 3321 struct drm_file *file) 3322 { 3323 struct drm_i915_gem_caching *args = data; 3324 struct drm_i915_gem_object *obj; 3325 int err = 0; 3326 3327 rcu_read_lock(); 3328 obj = i915_gem_object_lookup_rcu(file, args->handle); 3329 if (!obj) { 3330 err = -ENOENT; 3331 goto out; 3332 } 3333 3334 switch (obj->cache_level) { 3335 case I915_CACHE_LLC: 3336 case I915_CACHE_L3_LLC: 3337 args->caching = I915_CACHING_CACHED; 3338 break; 3339 3340 case I915_CACHE_WT: 3341 args->caching = I915_CACHING_DISPLAY; 3342 break; 3343 3344 default: 3345 args->caching = I915_CACHING_NONE; 3346 break; 3347 } 3348 out: 3349 rcu_read_unlock(); 3350 return err; 3351 } 3352 3353 int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data, 3354 struct drm_file *file) 3355 { 3356 struct drm_i915_private *i915 = to_i915(dev); 3357 struct drm_i915_gem_caching *args = data; 3358 struct drm_i915_gem_object *obj; 3359 enum i915_cache_level level; 3360 int ret; 3361 3362 switch (args->caching) { 3363 case I915_CACHING_NONE: 3364 level = I915_CACHE_NONE; 3365 break; 3366 case I915_CACHING_CACHED: 3367 /* 3368 * Due to a HW issue on BXT A stepping, GPU stores via a 3369 * snooped mapping may leave stale data in a corresponding CPU 3370 * cacheline, whereas normally such cachelines would get 3371 * invalidated. 3372 */ 3373 if (!HAS_LLC(i915) && !HAS_SNOOP(i915)) 3374 return -ENODEV; 3375 3376 level = I915_CACHE_LLC; 3377 break; 3378 case I915_CACHING_DISPLAY: 3379 level = HAS_WT(i915) ? I915_CACHE_WT : I915_CACHE_NONE; 3380 break; 3381 default: 3382 return -EINVAL; 3383 } 3384 3385 ret = i915_mutex_lock_interruptible(dev); 3386 if (ret) 3387 return ret; 3388 3389 obj = i915_gem_object_lookup(file, args->handle); 3390 if (!obj) { 3391 ret = -ENOENT; 3392 goto unlock; 3393 } 3394 3395 ret = i915_gem_object_set_cache_level(obj, level); 3396 i915_gem_object_put(obj); 3397 unlock: 3398 mutex_unlock(&dev->struct_mutex); 3399 return ret; 3400 } 3401 3402 /* 3403 * Prepare buffer for display plane (scanout, cursors, etc). 3404 * Can be called from an uninterruptible phase (modesetting) and allows 3405 * any flushes to be pipelined (for pageflips). 3406 */ 3407 struct i915_vma * 3408 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, 3409 u32 alignment, 3410 const struct i915_ggtt_view *view) 3411 { 3412 struct i915_vma *vma; 3413 u32 old_read_domains, old_write_domain; 3414 int ret; 3415 3416 lockdep_assert_held(&obj->base.dev->struct_mutex); 3417 3418 /* Mark the pin_display early so that we account for the 3419 * display coherency whilst setting up the cache domains. 3420 */ 3421 obj->pin_display++; 3422 3423 /* The display engine is not coherent with the LLC cache on gen6. As 3424 * a result, we make sure that the pinning that is about to occur is 3425 * done with uncached PTEs. This is lowest common denominator for all 3426 * chipsets. 3427 * 3428 * However for gen6+, we could do better by using the GFDT bit instead 3429 * of uncaching, which would allow us to flush all the LLC-cached data 3430 * with that bit in the PTE to main memory with just one PIPE_CONTROL. 3431 */ 3432 ret = i915_gem_object_set_cache_level(obj, 3433 HAS_WT(to_i915(obj->base.dev)) ? 3434 I915_CACHE_WT : I915_CACHE_NONE); 3435 if (ret) { 3436 vma = ERR_PTR(ret); 3437 goto err_unpin_display; 3438 } 3439 3440 /* As the user may map the buffer once pinned in the display plane 3441 * (e.g. libkms for the bootup splash), we have to ensure that we 3442 * always use map_and_fenceable for all scanout buffers. However, 3443 * it may simply be too big to fit into mappable, in which case 3444 * put it anyway and hope that userspace can cope (but always first 3445 * try to preserve the existing ABI). 3446 */ 3447 vma = ERR_PTR(-ENOSPC); 3448 if (view->type == I915_GGTT_VIEW_NORMAL) 3449 vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, 3450 PIN_MAPPABLE | PIN_NONBLOCK); 3451 if (IS_ERR(vma)) { 3452 struct drm_i915_private *i915 = to_i915(obj->base.dev); 3453 unsigned int flags; 3454 3455 /* Valleyview is definitely limited to scanning out the first 3456 * 512MiB. Lets presume this behaviour was inherited from the 3457 * g4x display engine and that all earlier gen are similarly 3458 * limited. Testing suggests that it is a little more 3459 * complicated than this. For example, Cherryview appears quite 3460 * happy to scanout from anywhere within its global aperture. 3461 */ 3462 flags = 0; 3463 if (HAS_GMCH_DISPLAY(i915)) 3464 flags = PIN_MAPPABLE; 3465 vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, flags); 3466 } 3467 if (IS_ERR(vma)) 3468 goto err_unpin_display; 3469 3470 vma->display_alignment = max_t(u64, vma->display_alignment, alignment); 3471 3472 /* Treat this as an end-of-frame, like intel_user_framebuffer_dirty() */ 3473 if (obj->cache_dirty) { 3474 i915_gem_clflush_object(obj, true); 3475 intel_fb_obj_flush(obj, false, ORIGIN_DIRTYFB); 3476 } 3477 3478 old_write_domain = obj->base.write_domain; 3479 old_read_domains = obj->base.read_domains; 3480 3481 /* It should now be out of any other write domains, and we can update 3482 * the domain values for our changes. 3483 */ 3484 obj->base.write_domain = 0; 3485 obj->base.read_domains |= I915_GEM_DOMAIN_GTT; 3486 3487 trace_i915_gem_object_change_domain(obj, 3488 old_read_domains, 3489 old_write_domain); 3490 3491 return vma; 3492 3493 err_unpin_display: 3494 obj->pin_display--; 3495 return vma; 3496 } 3497 3498 void 3499 i915_gem_object_unpin_from_display_plane(struct i915_vma *vma) 3500 { 3501 lockdep_assert_held(&vma->vm->dev->struct_mutex); 3502 3503 if (WARN_ON(vma->obj->pin_display == 0)) 3504 return; 3505 3506 if (--vma->obj->pin_display == 0) 3507 vma->display_alignment = 0; 3508 3509 /* Bump the LRU to try and avoid premature eviction whilst flipping */ 3510 if (!i915_vma_is_active(vma)) 3511 list_move_tail(&vma->vm_link, &vma->vm->inactive_list); 3512 3513 i915_vma_unpin(vma); 3514 } 3515 3516 /** 3517 * Moves a single object to the CPU read, and possibly write domain. 3518 * @obj: object to act on 3519 * @write: requesting write or read-only access 3520 * 3521 * This function returns when the move is complete, including waiting on 3522 * flushes to occur. 3523 */ 3524 int 3525 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write) 3526 { 3527 uint32_t old_write_domain, old_read_domains; 3528 int ret; 3529 3530 lockdep_assert_held(&obj->base.dev->struct_mutex); 3531 3532 ret = i915_gem_object_wait(obj, 3533 I915_WAIT_INTERRUPTIBLE | 3534 I915_WAIT_LOCKED | 3535 (write ? I915_WAIT_ALL : 0), 3536 MAX_SCHEDULE_TIMEOUT, 3537 NULL); 3538 if (ret) 3539 return ret; 3540 3541 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) 3542 return 0; 3543 3544 i915_gem_object_flush_gtt_write_domain(obj); 3545 3546 old_write_domain = obj->base.write_domain; 3547 old_read_domains = obj->base.read_domains; 3548 3549 /* Flush the CPU cache if it's still invalid. */ 3550 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) { 3551 i915_gem_clflush_object(obj, false); 3552 3553 obj->base.read_domains |= I915_GEM_DOMAIN_CPU; 3554 } 3555 3556 /* It should now be out of any other write domains, and we can update 3557 * the domain values for our changes. 3558 */ 3559 GEM_BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0); 3560 3561 /* If we're writing through the CPU, then the GPU read domains will 3562 * need to be invalidated at next use. 3563 */ 3564 if (write) { 3565 obj->base.read_domains = I915_GEM_DOMAIN_CPU; 3566 obj->base.write_domain = I915_GEM_DOMAIN_CPU; 3567 } 3568 3569 trace_i915_gem_object_change_domain(obj, 3570 old_read_domains, 3571 old_write_domain); 3572 3573 return 0; 3574 } 3575 3576 /* Throttle our rendering by waiting until the ring has completed our requests 3577 * emitted over 20 msec ago. 3578 * 3579 * Note that if we were to use the current jiffies each time around the loop, 3580 * we wouldn't escape the function with any frames outstanding if the time to 3581 * render a frame was over 20ms. 3582 * 3583 * This should get us reasonable parallelism between CPU and GPU but also 3584 * relatively low latency when blocking on a particular request to finish. 3585 */ 3586 static int 3587 i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file) 3588 { 3589 struct drm_i915_private *dev_priv = to_i915(dev); 3590 struct drm_i915_file_private *file_priv = file->driver_priv; 3591 unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES; 3592 struct drm_i915_gem_request *request, *target = NULL; 3593 long ret; 3594 3595 /* ABI: return -EIO if already wedged */ 3596 if (i915_terminally_wedged(&dev_priv->gpu_error)) 3597 return -EIO; 3598 3599 spin_lock(&file_priv->mm.lock); 3600 list_for_each_entry(request, &file_priv->mm.request_list, client_list) { 3601 if (time_after_eq(request->emitted_jiffies, recent_enough)) 3602 break; 3603 3604 /* 3605 * Note that the request might not have been submitted yet. 3606 * In which case emitted_jiffies will be zero. 3607 */ 3608 if (!request->emitted_jiffies) 3609 continue; 3610 3611 target = request; 3612 } 3613 if (target) 3614 i915_gem_request_get(target); 3615 spin_unlock(&file_priv->mm.lock); 3616 3617 if (target == NULL) 3618 return 0; 3619 3620 ret = i915_wait_request(target, 3621 I915_WAIT_INTERRUPTIBLE, 3622 MAX_SCHEDULE_TIMEOUT); 3623 i915_gem_request_put(target); 3624 3625 return ret < 0 ? ret : 0; 3626 } 3627 3628 struct i915_vma * 3629 i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, 3630 const struct i915_ggtt_view *view, 3631 u64 size, 3632 u64 alignment, 3633 u64 flags) 3634 { 3635 struct drm_i915_private *dev_priv = to_i915(obj->base.dev); 3636 struct i915_address_space *vm = &dev_priv->ggtt.base; 3637 struct i915_vma *vma; 3638 int ret; 3639 3640 lockdep_assert_held(&obj->base.dev->struct_mutex); 3641 3642 vma = i915_gem_obj_lookup_or_create_vma(obj, vm, view); 3643 if (IS_ERR(vma)) 3644 return vma; 3645 3646 if (i915_vma_misplaced(vma, size, alignment, flags)) { 3647 if (flags & PIN_NONBLOCK && 3648 (i915_vma_is_pinned(vma) || i915_vma_is_active(vma))) 3649 return ERR_PTR(-ENOSPC); 3650 3651 if (flags & PIN_MAPPABLE) { 3652 u32 fence_size; 3653 3654 fence_size = i915_gem_get_ggtt_size(dev_priv, vma->size, 3655 i915_gem_object_get_tiling(obj)); 3656 /* If the required space is larger than the available 3657 * aperture, we will not able to find a slot for the 3658 * object and unbinding the object now will be in 3659 * vain. Worse, doing so may cause us to ping-pong 3660 * the object in and out of the Global GTT and 3661 * waste a lot of cycles under the mutex. 3662 */ 3663 if (fence_size > dev_priv->ggtt.mappable_end) 3664 return ERR_PTR(-E2BIG); 3665 3666 /* If NONBLOCK is set the caller is optimistically 3667 * trying to cache the full object within the mappable 3668 * aperture, and *must* have a fallback in place for 3669 * situations where we cannot bind the object. We 3670 * can be a little more lax here and use the fallback 3671 * more often to avoid costly migrations of ourselves 3672 * and other objects within the aperture. 3673 * 3674 * Half-the-aperture is used as a simple heuristic. 3675 * More interesting would to do search for a free 3676 * block prior to making the commitment to unbind. 3677 * That caters for the self-harm case, and with a 3678 * little more heuristics (e.g. NOFAULT, NOEVICT) 3679 * we could try to minimise harm to others. 3680 */ 3681 if (flags & PIN_NONBLOCK && 3682 fence_size > dev_priv->ggtt.mappable_end / 2) 3683 return ERR_PTR(-ENOSPC); 3684 } 3685 3686 WARN(i915_vma_is_pinned(vma), 3687 "bo is already pinned in ggtt with incorrect alignment:" 3688 " offset=%08x, req.alignment=%llx," 3689 " req.map_and_fenceable=%d, vma->map_and_fenceable=%d\n", 3690 i915_ggtt_offset(vma), alignment, 3691 !!(flags & PIN_MAPPABLE), 3692 i915_vma_is_map_and_fenceable(vma)); 3693 ret = i915_vma_unbind(vma); 3694 if (ret) 3695 return ERR_PTR(ret); 3696 } 3697 3698 ret = i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL); 3699 if (ret) 3700 return ERR_PTR(ret); 3701 3702 return vma; 3703 } 3704 3705 static __always_inline unsigned int __busy_read_flag(unsigned int id) 3706 { 3707 /* Note that we could alias engines in the execbuf API, but 3708 * that would be very unwise as it prevents userspace from 3709 * fine control over engine selection. Ahem. 3710 * 3711 * This should be something like EXEC_MAX_ENGINE instead of 3712 * I915_NUM_ENGINES. 3713 */ 3714 BUILD_BUG_ON(I915_NUM_ENGINES > 16); 3715 return 0x10000 << id; 3716 } 3717 3718 static __always_inline unsigned int __busy_write_id(unsigned int id) 3719 { 3720 /* The uABI guarantees an active writer is also amongst the read 3721 * engines. This would be true if we accessed the activity tracking 3722 * under the lock, but as we perform the lookup of the object and 3723 * its activity locklessly we can not guarantee that the last_write 3724 * being active implies that we have set the same engine flag from 3725 * last_read - hence we always set both read and write busy for 3726 * last_write. 3727 */ 3728 return id | __busy_read_flag(id); 3729 } 3730 3731 static __always_inline unsigned int 3732 __busy_set_if_active(const struct dma_fence *fence, 3733 unsigned int (*flag)(unsigned int id)) 3734 { 3735 struct drm_i915_gem_request *rq; 3736 3737 /* We have to check the current hw status of the fence as the uABI 3738 * guarantees forward progress. We could rely on the idle worker 3739 * to eventually flush us, but to minimise latency just ask the 3740 * hardware. 3741 * 3742 * Note we only report on the status of native fences. 3743 */ 3744 if (!dma_fence_is_i915(fence)) 3745 return 0; 3746 3747 /* opencode to_request() in order to avoid const warnings */ 3748 rq = container_of(fence, struct drm_i915_gem_request, fence); 3749 if (i915_gem_request_completed(rq)) 3750 return 0; 3751 3752 return flag(rq->engine->exec_id); 3753 } 3754 3755 static __always_inline unsigned int 3756 busy_check_reader(const struct dma_fence *fence) 3757 { 3758 return __busy_set_if_active(fence, __busy_read_flag); 3759 } 3760 3761 static __always_inline unsigned int 3762 busy_check_writer(const struct dma_fence *fence) 3763 { 3764 if (!fence) 3765 return 0; 3766 3767 return __busy_set_if_active(fence, __busy_write_id); 3768 } 3769 3770 int 3771 i915_gem_busy_ioctl(struct drm_device *dev, void *data, 3772 struct drm_file *file) 3773 { 3774 struct drm_i915_gem_busy *args = data; 3775 struct drm_i915_gem_object *obj; 3776 struct reservation_object_list *list; 3777 unsigned int seq; 3778 int err; 3779 3780 err = -ENOENT; 3781 rcu_read_lock(); 3782 obj = i915_gem_object_lookup_rcu(file, args->handle); 3783 if (!obj) 3784 goto out; 3785 3786 /* A discrepancy here is that we do not report the status of 3787 * non-i915 fences, i.e. even though we may report the object as idle, 3788 * a call to set-domain may still stall waiting for foreign rendering. 3789 * This also means that wait-ioctl may report an object as busy, 3790 * where busy-ioctl considers it idle. 3791 * 3792 * We trade the ability to warn of foreign fences to report on which 3793 * i915 engines are active for the object. 3794 * 3795 * Alternatively, we can trade that extra information on read/write 3796 * activity with 3797 * args->busy = 3798 * !reservation_object_test_signaled_rcu(obj->resv, true); 3799 * to report the overall busyness. This is what the wait-ioctl does. 3800 * 3801 */ 3802 retry: 3803 seq = raw_read_seqcount(&obj->resv->seq); 3804 3805 /* Translate the exclusive fence to the READ *and* WRITE engine */ 3806 args->busy = busy_check_writer(rcu_dereference(obj->resv->fence_excl)); 3807 3808 /* Translate shared fences to READ set of engines */ 3809 list = rcu_dereference(obj->resv->fence); 3810 if (list) { 3811 unsigned int shared_count = list->shared_count, i; 3812 3813 for (i = 0; i < shared_count; ++i) { 3814 struct dma_fence *fence = 3815 rcu_dereference(list->shared[i]); 3816 3817 args->busy |= busy_check_reader(fence); 3818 } 3819 } 3820 3821 if (args->busy && read_seqcount_retry(&obj->resv->seq, seq)) 3822 goto retry; 3823 3824 err = 0; 3825 out: 3826 rcu_read_unlock(); 3827 return err; 3828 } 3829 3830 int 3831 i915_gem_throttle_ioctl(struct drm_device *dev, void *data, 3832 struct drm_file *file_priv) 3833 { 3834 return i915_gem_ring_throttle(dev, file_priv); 3835 } 3836 3837 int 3838 i915_gem_madvise_ioctl(struct drm_device *dev, void *data, 3839 struct drm_file *file_priv) 3840 { 3841 struct drm_i915_private *dev_priv = to_i915(dev); 3842 struct drm_i915_gem_madvise *args = data; 3843 struct drm_i915_gem_object *obj; 3844 int err; 3845 3846 switch (args->madv) { 3847 case I915_MADV_DONTNEED: 3848 case I915_MADV_WILLNEED: 3849 break; 3850 default: 3851 return -EINVAL; 3852 } 3853 3854 obj = i915_gem_object_lookup(file_priv, args->handle); 3855 if (!obj) 3856 return -ENOENT; 3857 3858 err = mutex_lock_interruptible(&obj->mm.lock); 3859 if (err) 3860 goto out; 3861 3862 if (obj->mm.pages && 3863 i915_gem_object_is_tiled(obj) && 3864 dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) { 3865 if (obj->mm.madv == I915_MADV_WILLNEED) { 3866 GEM_BUG_ON(!obj->mm.quirked); 3867 __i915_gem_object_unpin_pages(obj); 3868 obj->mm.quirked = false; 3869 } 3870 if (args->madv == I915_MADV_WILLNEED) { 3871 GEM_BUG_ON(obj->mm.quirked); 3872 __i915_gem_object_pin_pages(obj); 3873 obj->mm.quirked = true; 3874 } 3875 } 3876 3877 if (obj->mm.madv != __I915_MADV_PURGED) 3878 obj->mm.madv = args->madv; 3879 3880 /* if the object is no longer attached, discard its backing storage */ 3881 if (obj->mm.madv == I915_MADV_DONTNEED && !obj->mm.pages) 3882 i915_gem_object_truncate(obj); 3883 3884 args->retained = obj->mm.madv != __I915_MADV_PURGED; 3885 mutex_unlock(&obj->mm.lock); 3886 3887 out: 3888 i915_gem_object_put(obj); 3889 return err; 3890 } 3891 3892 static void 3893 frontbuffer_retire(struct i915_gem_active *active, 3894 struct drm_i915_gem_request *request) 3895 { 3896 struct drm_i915_gem_object *obj = 3897 container_of(active, typeof(*obj), frontbuffer_write); 3898 3899 intel_fb_obj_flush(obj, true, ORIGIN_CS); 3900 } 3901 3902 void i915_gem_object_init(struct drm_i915_gem_object *obj, 3903 const struct drm_i915_gem_object_ops *ops) 3904 { 3905 mutex_init(&obj->mm.lock); 3906 3907 INIT_LIST_HEAD(&obj->global_link); 3908 INIT_LIST_HEAD(&obj->userfault_link); 3909 INIT_LIST_HEAD(&obj->obj_exec_link); 3910 INIT_LIST_HEAD(&obj->vma_list); 3911 INIT_LIST_HEAD(&obj->batch_pool_link); 3912 3913 obj->ops = ops; 3914 3915 reservation_object_init(&obj->__builtin_resv); 3916 obj->resv = &obj->__builtin_resv; 3917 3918 obj->frontbuffer_ggtt_origin = ORIGIN_GTT; 3919 init_request_active(&obj->frontbuffer_write, frontbuffer_retire); 3920 3921 obj->mm.madv = I915_MADV_WILLNEED; 3922 INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN); 3923 mutex_init(&obj->mm.get_page.lock); 3924 3925 i915_gem_info_add_obj(to_i915(obj->base.dev), obj->base.size); 3926 } 3927 3928 static const struct drm_i915_gem_object_ops i915_gem_object_ops = { 3929 .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE | 3930 I915_GEM_OBJECT_IS_SHRINKABLE, 3931 .get_pages = i915_gem_object_get_pages_gtt, 3932 .put_pages = i915_gem_object_put_pages_gtt, 3933 }; 3934 3935 /* Note we don't consider signbits :| */ 3936 #define overflows_type(x, T) \ 3937 (sizeof(x) > sizeof(T) && (x) >> (sizeof(T) * BITS_PER_BYTE)) 3938 3939 struct drm_i915_gem_object * 3940 i915_gem_object_create(struct drm_device *dev, u64 size) 3941 { 3942 struct drm_i915_private *dev_priv = to_i915(dev); 3943 struct drm_i915_gem_object *obj; 3944 struct address_space *mapping; 3945 gfp_t mask; 3946 int ret; 3947 3948 /* There is a prevalence of the assumption that we fit the object's 3949 * page count inside a 32bit _signed_ variable. Let's document this and 3950 * catch if we ever need to fix it. In the meantime, if you do spot 3951 * such a local variable, please consider fixing! 3952 */ 3953 if (WARN_ON(size >> PAGE_SHIFT > INT_MAX)) 3954 return ERR_PTR(-E2BIG); 3955 3956 if (overflows_type(size, obj->base.size)) 3957 return ERR_PTR(-E2BIG); 3958 3959 obj = i915_gem_object_alloc(dev); 3960 if (obj == NULL) 3961 return ERR_PTR(-ENOMEM); 3962 3963 ret = drm_gem_object_init(dev, &obj->base, size); 3964 if (ret) 3965 goto fail; 3966 3967 mask = GFP_HIGHUSER | __GFP_RECLAIMABLE; 3968 if (IS_CRESTLINE(dev_priv) || IS_BROADWATER(dev_priv)) { 3969 /* 965gm cannot relocate objects above 4GiB. */ 3970 mask &= ~__GFP_HIGHMEM; 3971 mask |= __GFP_DMA32; 3972 } 3973 3974 mapping = obj->base.filp->f_mapping; 3975 mapping_set_gfp_mask(mapping, mask); 3976 3977 i915_gem_object_init(obj, &i915_gem_object_ops); 3978 3979 obj->base.write_domain = I915_GEM_DOMAIN_CPU; 3980 obj->base.read_domains = I915_GEM_DOMAIN_CPU; 3981 3982 if (HAS_LLC(dev_priv)) { 3983 /* On some devices, we can have the GPU use the LLC (the CPU 3984 * cache) for about a 10% performance improvement 3985 * compared to uncached. Graphics requests other than 3986 * display scanout are coherent with the CPU in 3987 * accessing this cache. This means in this mode we 3988 * don't need to clflush on the CPU side, and on the 3989 * GPU side we only need to flush internal caches to 3990 * get data visible to the CPU. 3991 * 3992 * However, we maintain the display planes as UC, and so 3993 * need to rebind when first used as such. 3994 */ 3995 obj->cache_level = I915_CACHE_LLC; 3996 } else 3997 obj->cache_level = I915_CACHE_NONE; 3998 3999 trace_i915_gem_object_create(obj); 4000 4001 return obj; 4002 4003 fail: 4004 i915_gem_object_free(obj); 4005 return ERR_PTR(ret); 4006 } 4007 4008 static bool discard_backing_storage(struct drm_i915_gem_object *obj) 4009 { 4010 /* If we are the last user of the backing storage (be it shmemfs 4011 * pages or stolen etc), we know that the pages are going to be 4012 * immediately released. In this case, we can then skip copying 4013 * back the contents from the GPU. 4014 */ 4015 4016 if (obj->mm.madv != I915_MADV_WILLNEED) 4017 return false; 4018 4019 if (obj->base.filp == NULL) 4020 return true; 4021 4022 /* At first glance, this looks racy, but then again so would be 4023 * userspace racing mmap against close. However, the first external 4024 * reference to the filp can only be obtained through the 4025 * i915_gem_mmap_ioctl() which safeguards us against the user 4026 * acquiring such a reference whilst we are in the middle of 4027 * freeing the object. 4028 */ 4029 return atomic_long_read(&obj->base.filp->f_count) == 1; 4030 } 4031 4032 static void __i915_gem_free_objects(struct drm_i915_private *i915, 4033 struct llist_node *freed) 4034 { 4035 struct drm_i915_gem_object *obj, *on; 4036 4037 mutex_lock(&i915->drm.struct_mutex); 4038 intel_runtime_pm_get(i915); 4039 llist_for_each_entry(obj, freed, freed) { 4040 struct i915_vma *vma, *vn; 4041 4042 trace_i915_gem_object_destroy(obj); 4043 4044 GEM_BUG_ON(i915_gem_object_is_active(obj)); 4045 list_for_each_entry_safe(vma, vn, 4046 &obj->vma_list, obj_link) { 4047 GEM_BUG_ON(!i915_vma_is_ggtt(vma)); 4048 GEM_BUG_ON(i915_vma_is_active(vma)); 4049 vma->flags &= ~I915_VMA_PIN_MASK; 4050 i915_vma_close(vma); 4051 } 4052 GEM_BUG_ON(!list_empty(&obj->vma_list)); 4053 GEM_BUG_ON(!RB_EMPTY_ROOT(&obj->vma_tree)); 4054 4055 list_del(&obj->global_link); 4056 } 4057 intel_runtime_pm_put(i915); 4058 mutex_unlock(&i915->drm.struct_mutex); 4059 4060 llist_for_each_entry_safe(obj, on, freed, freed) { 4061 GEM_BUG_ON(obj->bind_count); 4062 GEM_BUG_ON(atomic_read(&obj->frontbuffer_bits)); 4063 4064 if (obj->ops->release) 4065 obj->ops->release(obj); 4066 4067 if (WARN_ON(i915_gem_object_has_pinned_pages(obj))) 4068 atomic_set(&obj->mm.pages_pin_count, 0); 4069 __i915_gem_object_put_pages(obj, I915_MM_NORMAL); 4070 GEM_BUG_ON(obj->mm.pages); 4071 4072 if (obj->base.import_attach) 4073 drm_prime_gem_destroy(&obj->base, NULL); 4074 4075 reservation_object_fini(&obj->__builtin_resv); 4076 drm_gem_object_release(&obj->base); 4077 i915_gem_info_remove_obj(i915, obj->base.size); 4078 4079 kfree(obj->bit_17); 4080 i915_gem_object_free(obj); 4081 } 4082 } 4083 4084 static void i915_gem_flush_free_objects(struct drm_i915_private *i915) 4085 { 4086 struct llist_node *freed; 4087 4088 freed = llist_del_all(&i915->mm.free_list); 4089 if (unlikely(freed)) 4090 __i915_gem_free_objects(i915, freed); 4091 } 4092 4093 static void __i915_gem_free_work(struct work_struct *work) 4094 { 4095 struct drm_i915_private *i915 = 4096 container_of(work, struct drm_i915_private, mm.free_work); 4097 struct llist_node *freed; 4098 4099 /* All file-owned VMA should have been released by this point through 4100 * i915_gem_close_object(), or earlier by i915_gem_context_close(). 4101 * However, the object may also be bound into the global GTT (e.g. 4102 * older GPUs without per-process support, or for direct access through 4103 * the GTT either for the user or for scanout). Those VMA still need to 4104 * unbound now. 4105 */ 4106 4107 while ((freed = llist_del_all(&i915->mm.free_list))) 4108 __i915_gem_free_objects(i915, freed); 4109 } 4110 4111 static void __i915_gem_free_object_rcu(struct rcu_head *head) 4112 { 4113 struct drm_i915_gem_object *obj = 4114 container_of(head, typeof(*obj), rcu); 4115 struct drm_i915_private *i915 = to_i915(obj->base.dev); 4116 4117 /* We can't simply use call_rcu() from i915_gem_free_object() 4118 * as we need to block whilst unbinding, and the call_rcu 4119 * task may be called from softirq context. So we take a 4120 * detour through a worker. 4121 */ 4122 if (llist_add(&obj->freed, &i915->mm.free_list)) 4123 schedule_work(&i915->mm.free_work); 4124 } 4125 4126 void i915_gem_free_object(struct drm_gem_object *gem_obj) 4127 { 4128 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj); 4129 4130 if (obj->mm.quirked) 4131 __i915_gem_object_unpin_pages(obj); 4132 4133 if (discard_backing_storage(obj)) 4134 obj->mm.madv = I915_MADV_DONTNEED; 4135 4136 /* Before we free the object, make sure any pure RCU-only 4137 * read-side critical sections are complete, e.g. 4138 * i915_gem_busy_ioctl(). For the corresponding synchronized 4139 * lookup see i915_gem_object_lookup_rcu(). 4140 */ 4141 call_rcu(&obj->rcu, __i915_gem_free_object_rcu); 4142 } 4143 4144 void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj) 4145 { 4146 lockdep_assert_held(&obj->base.dev->struct_mutex); 4147 4148 GEM_BUG_ON(i915_gem_object_has_active_reference(obj)); 4149 if (i915_gem_object_is_active(obj)) 4150 i915_gem_object_set_active_reference(obj); 4151 else 4152 i915_gem_object_put(obj); 4153 } 4154 4155 static void assert_kernel_context_is_current(struct drm_i915_private *dev_priv) 4156 { 4157 struct intel_engine_cs *engine; 4158 enum intel_engine_id id; 4159 4160 for_each_engine(engine, dev_priv, id) 4161 GEM_BUG_ON(engine->last_context != dev_priv->kernel_context); 4162 } 4163 4164 int i915_gem_suspend(struct drm_device *dev) 4165 { 4166 struct drm_i915_private *dev_priv = to_i915(dev); 4167 int ret; 4168 4169 intel_suspend_gt_powersave(dev_priv); 4170 4171 mutex_lock(&dev->struct_mutex); 4172 4173 /* We have to flush all the executing contexts to main memory so 4174 * that they can saved in the hibernation image. To ensure the last 4175 * context image is coherent, we have to switch away from it. That 4176 * leaves the dev_priv->kernel_context still active when 4177 * we actually suspend, and its image in memory may not match the GPU 4178 * state. Fortunately, the kernel_context is disposable and we do 4179 * not rely on its state. 4180 */ 4181 ret = i915_gem_switch_to_kernel_context(dev_priv); 4182 if (ret) 4183 goto err; 4184 4185 ret = i915_gem_wait_for_idle(dev_priv, 4186 I915_WAIT_INTERRUPTIBLE | 4187 I915_WAIT_LOCKED); 4188 if (ret) 4189 goto err; 4190 4191 i915_gem_retire_requests(dev_priv); 4192 GEM_BUG_ON(dev_priv->gt.active_requests); 4193 4194 assert_kernel_context_is_current(dev_priv); 4195 i915_gem_context_lost(dev_priv); 4196 mutex_unlock(&dev->struct_mutex); 4197 4198 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work); 4199 cancel_delayed_work_sync(&dev_priv->gt.retire_work); 4200 flush_delayed_work(&dev_priv->gt.idle_work); 4201 flush_work(&dev_priv->mm.free_work); 4202 4203 /* Assert that we sucessfully flushed all the work and 4204 * reset the GPU back to its idle, low power state. 4205 */ 4206 WARN_ON(dev_priv->gt.awake); 4207 WARN_ON(!intel_execlists_idle(dev_priv)); 4208 4209 /* 4210 * Neither the BIOS, ourselves or any other kernel 4211 * expects the system to be in execlists mode on startup, 4212 * so we need to reset the GPU back to legacy mode. And the only 4213 * known way to disable logical contexts is through a GPU reset. 4214 * 4215 * So in order to leave the system in a known default configuration, 4216 * always reset the GPU upon unload and suspend. Afterwards we then 4217 * clean up the GEM state tracking, flushing off the requests and 4218 * leaving the system in a known idle state. 4219 * 4220 * Note that is of the upmost importance that the GPU is idle and 4221 * all stray writes are flushed *before* we dismantle the backing 4222 * storage for the pinned objects. 4223 * 4224 * However, since we are uncertain that resetting the GPU on older 4225 * machines is a good idea, we don't - just in case it leaves the 4226 * machine in an unusable condition. 4227 */ 4228 if (HAS_HW_CONTEXTS(dev_priv)) { 4229 int reset = intel_gpu_reset(dev_priv, ALL_ENGINES); 4230 WARN_ON(reset && reset != -ENODEV); 4231 } 4232 4233 return 0; 4234 4235 err: 4236 mutex_unlock(&dev->struct_mutex); 4237 return ret; 4238 } 4239 4240 void i915_gem_resume(struct drm_device *dev) 4241 { 4242 struct drm_i915_private *dev_priv = to_i915(dev); 4243 4244 WARN_ON(dev_priv->gt.awake); 4245 4246 mutex_lock(&dev->struct_mutex); 4247 i915_gem_restore_gtt_mappings(dev_priv); 4248 4249 /* As we didn't flush the kernel context before suspend, we cannot 4250 * guarantee that the context image is complete. So let's just reset 4251 * it and start again. 4252 */ 4253 dev_priv->gt.resume(dev_priv); 4254 4255 mutex_unlock(&dev->struct_mutex); 4256 } 4257 4258 void i915_gem_init_swizzling(struct drm_i915_private *dev_priv) 4259 { 4260 if (INTEL_GEN(dev_priv) < 5 || 4261 dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE) 4262 return; 4263 4264 I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) | 4265 DISP_TILE_SURFACE_SWIZZLING); 4266 4267 if (IS_GEN5(dev_priv)) 4268 return; 4269 4270 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL); 4271 if (IS_GEN6(dev_priv)) 4272 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB)); 4273 else if (IS_GEN7(dev_priv)) 4274 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB)); 4275 else if (IS_GEN8(dev_priv)) 4276 I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW)); 4277 else 4278 BUG(); 4279 } 4280 4281 static void init_unused_ring(struct drm_i915_private *dev_priv, u32 base) 4282 { 4283 I915_WRITE(RING_CTL(base), 0); 4284 I915_WRITE(RING_HEAD(base), 0); 4285 I915_WRITE(RING_TAIL(base), 0); 4286 I915_WRITE(RING_START(base), 0); 4287 } 4288 4289 static void init_unused_rings(struct drm_i915_private *dev_priv) 4290 { 4291 if (IS_I830(dev_priv)) { 4292 init_unused_ring(dev_priv, PRB1_BASE); 4293 init_unused_ring(dev_priv, SRB0_BASE); 4294 init_unused_ring(dev_priv, SRB1_BASE); 4295 init_unused_ring(dev_priv, SRB2_BASE); 4296 init_unused_ring(dev_priv, SRB3_BASE); 4297 } else if (IS_GEN2(dev_priv)) { 4298 init_unused_ring(dev_priv, SRB0_BASE); 4299 init_unused_ring(dev_priv, SRB1_BASE); 4300 } else if (IS_GEN3(dev_priv)) { 4301 init_unused_ring(dev_priv, PRB1_BASE); 4302 init_unused_ring(dev_priv, PRB2_BASE); 4303 } 4304 } 4305 4306 int 4307 i915_gem_init_hw(struct drm_device *dev) 4308 { 4309 struct drm_i915_private *dev_priv = to_i915(dev); 4310 struct intel_engine_cs *engine; 4311 enum intel_engine_id id; 4312 int ret; 4313 4314 dev_priv->gt.last_init_time = ktime_get(); 4315 4316 /* Double layer security blanket, see i915_gem_init() */ 4317 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 4318 4319 if (HAS_EDRAM(dev_priv) && INTEL_GEN(dev_priv) < 9) 4320 I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf)); 4321 4322 if (IS_HASWELL(dev_priv)) 4323 I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev_priv) ? 4324 LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED); 4325 4326 if (HAS_PCH_NOP(dev_priv)) { 4327 if (IS_IVYBRIDGE(dev_priv)) { 4328 u32 temp = I915_READ(GEN7_MSG_CTL); 4329 temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK); 4330 I915_WRITE(GEN7_MSG_CTL, temp); 4331 } else if (INTEL_GEN(dev_priv) >= 7) { 4332 u32 temp = I915_READ(HSW_NDE_RSTWRN_OPT); 4333 temp &= ~RESET_PCH_HANDSHAKE_ENABLE; 4334 I915_WRITE(HSW_NDE_RSTWRN_OPT, temp); 4335 } 4336 } 4337 4338 i915_gem_init_swizzling(dev_priv); 4339 4340 /* 4341 * At least 830 can leave some of the unused rings 4342 * "active" (ie. head != tail) after resume which 4343 * will prevent c3 entry. Makes sure all unused rings 4344 * are totally idle. 4345 */ 4346 init_unused_rings(dev_priv); 4347 4348 BUG_ON(!dev_priv->kernel_context); 4349 4350 ret = i915_ppgtt_init_hw(dev_priv); 4351 if (ret) { 4352 DRM_ERROR("PPGTT enable HW failed %d\n", ret); 4353 goto out; 4354 } 4355 4356 /* Need to do basic initialisation of all rings first: */ 4357 for_each_engine(engine, dev_priv, id) { 4358 ret = engine->init_hw(engine); 4359 if (ret) 4360 goto out; 4361 } 4362 4363 intel_mocs_init_l3cc_table(dev); 4364 4365 /* We can't enable contexts until all firmware is loaded */ 4366 ret = intel_guc_setup(dev); 4367 if (ret) 4368 goto out; 4369 4370 out: 4371 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 4372 return ret; 4373 } 4374 4375 bool intel_sanitize_semaphores(struct drm_i915_private *dev_priv, int value) 4376 { 4377 if (INTEL_INFO(dev_priv)->gen < 6) 4378 return false; 4379 4380 /* TODO: make semaphores and Execlists play nicely together */ 4381 if (i915.enable_execlists) 4382 return false; 4383 4384 if (value >= 0) 4385 return value; 4386 4387 #ifdef CONFIG_INTEL_IOMMU 4388 /* Enable semaphores on SNB when IO remapping is off */ 4389 if (INTEL_INFO(dev_priv)->gen == 6 && intel_iommu_gfx_mapped) 4390 return false; 4391 #endif 4392 4393 return true; 4394 } 4395 4396 int i915_gem_init(struct drm_device *dev) 4397 { 4398 struct drm_i915_private *dev_priv = to_i915(dev); 4399 int ret; 4400 4401 mutex_lock(&dev->struct_mutex); 4402 4403 if (!i915.enable_execlists) { 4404 dev_priv->gt.resume = intel_legacy_submission_resume; 4405 dev_priv->gt.cleanup_engine = intel_engine_cleanup; 4406 } else { 4407 dev_priv->gt.resume = intel_lr_context_resume; 4408 dev_priv->gt.cleanup_engine = intel_logical_ring_cleanup; 4409 } 4410 4411 /* This is just a security blanket to placate dragons. 4412 * On some systems, we very sporadically observe that the first TLBs 4413 * used by the CS may be stale, despite us poking the TLB reset. If 4414 * we hold the forcewake during initialisation these problems 4415 * just magically go away. 4416 */ 4417 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 4418 4419 i915_gem_init_userptr(dev_priv); 4420 4421 ret = i915_gem_init_ggtt(dev_priv); 4422 if (ret) 4423 goto out_unlock; 4424 4425 ret = i915_gem_context_init(dev); 4426 if (ret) 4427 goto out_unlock; 4428 4429 ret = intel_engines_init(dev); 4430 if (ret) 4431 goto out_unlock; 4432 4433 ret = i915_gem_init_hw(dev); 4434 if (ret == -EIO) { 4435 /* Allow engine initialisation to fail by marking the GPU as 4436 * wedged. But we only want to do this where the GPU is angry, 4437 * for all other failure, such as an allocation failure, bail. 4438 */ 4439 DRM_ERROR("Failed to initialize GPU, declaring it wedged\n"); 4440 i915_gem_set_wedged(dev_priv); 4441 ret = 0; 4442 } 4443 4444 out_unlock: 4445 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 4446 mutex_unlock(&dev->struct_mutex); 4447 4448 return ret; 4449 } 4450 4451 void 4452 i915_gem_cleanup_engines(struct drm_device *dev) 4453 { 4454 struct drm_i915_private *dev_priv = to_i915(dev); 4455 struct intel_engine_cs *engine; 4456 enum intel_engine_id id; 4457 4458 for_each_engine(engine, dev_priv, id) 4459 dev_priv->gt.cleanup_engine(engine); 4460 } 4461 4462 void 4463 i915_gem_load_init_fences(struct drm_i915_private *dev_priv) 4464 { 4465 int i; 4466 4467 if (INTEL_INFO(dev_priv)->gen >= 7 && !IS_VALLEYVIEW(dev_priv) && 4468 !IS_CHERRYVIEW(dev_priv)) 4469 dev_priv->num_fence_regs = 32; 4470 else if (INTEL_INFO(dev_priv)->gen >= 4 || IS_I945G(dev_priv) || 4471 IS_I945GM(dev_priv) || IS_G33(dev_priv)) 4472 dev_priv->num_fence_regs = 16; 4473 else 4474 dev_priv->num_fence_regs = 8; 4475 4476 if (intel_vgpu_active(dev_priv)) 4477 dev_priv->num_fence_regs = 4478 I915_READ(vgtif_reg(avail_rs.fence_num)); 4479 4480 /* Initialize fence registers to zero */ 4481 for (i = 0; i < dev_priv->num_fence_regs; i++) { 4482 struct drm_i915_fence_reg *fence = &dev_priv->fence_regs[i]; 4483 4484 fence->i915 = dev_priv; 4485 fence->id = i; 4486 list_add_tail(&fence->link, &dev_priv->mm.fence_list); 4487 } 4488 i915_gem_restore_fences(dev_priv); 4489 4490 i915_gem_detect_bit_6_swizzle(dev_priv); 4491 } 4492 4493 int 4494 i915_gem_load_init(struct drm_device *dev) 4495 { 4496 struct drm_i915_private *dev_priv = to_i915(dev); 4497 int err = -ENOMEM; 4498 4499 dev_priv->objects = KMEM_CACHE(drm_i915_gem_object, SLAB_HWCACHE_ALIGN); 4500 if (!dev_priv->objects) 4501 goto err_out; 4502 4503 dev_priv->vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN); 4504 if (!dev_priv->vmas) 4505 goto err_objects; 4506 4507 dev_priv->requests = KMEM_CACHE(drm_i915_gem_request, 4508 SLAB_HWCACHE_ALIGN | 4509 SLAB_RECLAIM_ACCOUNT | 4510 SLAB_DESTROY_BY_RCU); 4511 if (!dev_priv->requests) 4512 goto err_vmas; 4513 4514 dev_priv->dependencies = KMEM_CACHE(i915_dependency, 4515 SLAB_HWCACHE_ALIGN | 4516 SLAB_RECLAIM_ACCOUNT); 4517 if (!dev_priv->dependencies) 4518 goto err_requests; 4519 4520 mutex_lock(&dev_priv->drm.struct_mutex); 4521 INIT_LIST_HEAD(&dev_priv->gt.timelines); 4522 err = i915_gem_timeline_init__global(dev_priv); 4523 mutex_unlock(&dev_priv->drm.struct_mutex); 4524 if (err) 4525 goto err_dependencies; 4526 4527 INIT_LIST_HEAD(&dev_priv->context_list); 4528 INIT_WORK(&dev_priv->mm.free_work, __i915_gem_free_work); 4529 init_llist_head(&dev_priv->mm.free_list); 4530 INIT_LIST_HEAD(&dev_priv->mm.unbound_list); 4531 INIT_LIST_HEAD(&dev_priv->mm.bound_list); 4532 INIT_LIST_HEAD(&dev_priv->mm.fence_list); 4533 INIT_LIST_HEAD(&dev_priv->mm.userfault_list); 4534 INIT_DELAYED_WORK(&dev_priv->gt.retire_work, 4535 i915_gem_retire_work_handler); 4536 INIT_DELAYED_WORK(&dev_priv->gt.idle_work, 4537 i915_gem_idle_work_handler); 4538 init_waitqueue_head(&dev_priv->gpu_error.wait_queue); 4539 init_waitqueue_head(&dev_priv->gpu_error.reset_queue); 4540 4541 dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL; 4542 4543 init_waitqueue_head(&dev_priv->pending_flip_queue); 4544 4545 dev_priv->mm.interruptible = true; 4546 4547 atomic_set(&dev_priv->mm.bsd_engine_dispatch_index, 0); 4548 4549 spin_lock_init(&dev_priv->fb_tracking.lock); 4550 4551 return 0; 4552 4553 err_dependencies: 4554 kmem_cache_destroy(dev_priv->dependencies); 4555 err_requests: 4556 kmem_cache_destroy(dev_priv->requests); 4557 err_vmas: 4558 kmem_cache_destroy(dev_priv->vmas); 4559 err_objects: 4560 kmem_cache_destroy(dev_priv->objects); 4561 err_out: 4562 return err; 4563 } 4564 4565 void i915_gem_load_cleanup(struct drm_device *dev) 4566 { 4567 struct drm_i915_private *dev_priv = to_i915(dev); 4568 4569 WARN_ON(!llist_empty(&dev_priv->mm.free_list)); 4570 4571 mutex_lock(&dev_priv->drm.struct_mutex); 4572 i915_gem_timeline_fini(&dev_priv->gt.global_timeline); 4573 WARN_ON(!list_empty(&dev_priv->gt.timelines)); 4574 mutex_unlock(&dev_priv->drm.struct_mutex); 4575 4576 kmem_cache_destroy(dev_priv->dependencies); 4577 kmem_cache_destroy(dev_priv->requests); 4578 kmem_cache_destroy(dev_priv->vmas); 4579 kmem_cache_destroy(dev_priv->objects); 4580 4581 /* And ensure that our DESTROY_BY_RCU slabs are truly destroyed */ 4582 rcu_barrier(); 4583 } 4584 4585 int i915_gem_freeze(struct drm_i915_private *dev_priv) 4586 { 4587 intel_runtime_pm_get(dev_priv); 4588 4589 mutex_lock(&dev_priv->drm.struct_mutex); 4590 i915_gem_shrink_all(dev_priv); 4591 mutex_unlock(&dev_priv->drm.struct_mutex); 4592 4593 intel_runtime_pm_put(dev_priv); 4594 4595 return 0; 4596 } 4597 4598 int i915_gem_freeze_late(struct drm_i915_private *dev_priv) 4599 { 4600 struct drm_i915_gem_object *obj; 4601 struct list_head *phases[] = { 4602 &dev_priv->mm.unbound_list, 4603 &dev_priv->mm.bound_list, 4604 NULL 4605 }, **p; 4606 4607 /* Called just before we write the hibernation image. 4608 * 4609 * We need to update the domain tracking to reflect that the CPU 4610 * will be accessing all the pages to create and restore from the 4611 * hibernation, and so upon restoration those pages will be in the 4612 * CPU domain. 4613 * 4614 * To make sure the hibernation image contains the latest state, 4615 * we update that state just before writing out the image. 4616 * 4617 * To try and reduce the hibernation image, we manually shrink 4618 * the objects as well. 4619 */ 4620 4621 mutex_lock(&dev_priv->drm.struct_mutex); 4622 i915_gem_shrink(dev_priv, -1UL, I915_SHRINK_UNBOUND); 4623 4624 for (p = phases; *p; p++) { 4625 list_for_each_entry(obj, *p, global_link) { 4626 obj->base.read_domains = I915_GEM_DOMAIN_CPU; 4627 obj->base.write_domain = I915_GEM_DOMAIN_CPU; 4628 } 4629 } 4630 mutex_unlock(&dev_priv->drm.struct_mutex); 4631 4632 return 0; 4633 } 4634 4635 void i915_gem_release(struct drm_device *dev, struct drm_file *file) 4636 { 4637 struct drm_i915_file_private *file_priv = file->driver_priv; 4638 struct drm_i915_gem_request *request; 4639 4640 /* Clean up our request list when the client is going away, so that 4641 * later retire_requests won't dereference our soon-to-be-gone 4642 * file_priv. 4643 */ 4644 spin_lock(&file_priv->mm.lock); 4645 list_for_each_entry(request, &file_priv->mm.request_list, client_list) 4646 request->file_priv = NULL; 4647 spin_unlock(&file_priv->mm.lock); 4648 4649 if (!list_empty(&file_priv->rps.link)) { 4650 spin_lock(&to_i915(dev)->rps.client_lock); 4651 list_del(&file_priv->rps.link); 4652 spin_unlock(&to_i915(dev)->rps.client_lock); 4653 } 4654 } 4655 4656 int i915_gem_open(struct drm_device *dev, struct drm_file *file) 4657 { 4658 struct drm_i915_file_private *file_priv; 4659 int ret; 4660 4661 DRM_DEBUG("\n"); 4662 4663 file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL); 4664 if (!file_priv) 4665 return -ENOMEM; 4666 4667 file->driver_priv = file_priv; 4668 file_priv->dev_priv = to_i915(dev); 4669 file_priv->file = file; 4670 INIT_LIST_HEAD(&file_priv->rps.link); 4671 4672 spin_lock_init(&file_priv->mm.lock); 4673 INIT_LIST_HEAD(&file_priv->mm.request_list); 4674 4675 file_priv->bsd_engine = -1; 4676 4677 ret = i915_gem_context_open(dev, file); 4678 if (ret) 4679 kfree(file_priv); 4680 4681 return ret; 4682 } 4683 4684 /** 4685 * i915_gem_track_fb - update frontbuffer tracking 4686 * @old: current GEM buffer for the frontbuffer slots 4687 * @new: new GEM buffer for the frontbuffer slots 4688 * @frontbuffer_bits: bitmask of frontbuffer slots 4689 * 4690 * This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them 4691 * from @old and setting them in @new. Both @old and @new can be NULL. 4692 */ 4693 void i915_gem_track_fb(struct drm_i915_gem_object *old, 4694 struct drm_i915_gem_object *new, 4695 unsigned frontbuffer_bits) 4696 { 4697 /* Control of individual bits within the mask are guarded by 4698 * the owning plane->mutex, i.e. we can never see concurrent 4699 * manipulation of individual bits. But since the bitfield as a whole 4700 * is updated using RMW, we need to use atomics in order to update 4701 * the bits. 4702 */ 4703 BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES > 4704 sizeof(atomic_t) * BITS_PER_BYTE); 4705 4706 if (old) { 4707 WARN_ON(!(atomic_read(&old->frontbuffer_bits) & frontbuffer_bits)); 4708 atomic_andnot(frontbuffer_bits, &old->frontbuffer_bits); 4709 } 4710 4711 if (new) { 4712 WARN_ON(atomic_read(&new->frontbuffer_bits) & frontbuffer_bits); 4713 atomic_or(frontbuffer_bits, &new->frontbuffer_bits); 4714 } 4715 } 4716 4717 /* Allocate a new GEM object and fill it with the supplied data */ 4718 struct drm_i915_gem_object * 4719 i915_gem_object_create_from_data(struct drm_device *dev, 4720 const void *data, size_t size) 4721 { 4722 struct drm_i915_gem_object *obj; 4723 struct sg_table *sg; 4724 size_t bytes; 4725 int ret; 4726 4727 obj = i915_gem_object_create(dev, round_up(size, PAGE_SIZE)); 4728 if (IS_ERR(obj)) 4729 return obj; 4730 4731 ret = i915_gem_object_set_to_cpu_domain(obj, true); 4732 if (ret) 4733 goto fail; 4734 4735 ret = i915_gem_object_pin_pages(obj); 4736 if (ret) 4737 goto fail; 4738 4739 sg = obj->mm.pages; 4740 bytes = sg_copy_from_buffer(sg->sgl, sg->nents, (void *)data, size); 4741 obj->mm.dirty = true; /* Backing store is now out of date */ 4742 i915_gem_object_unpin_pages(obj); 4743 4744 if (WARN_ON(bytes != size)) { 4745 DRM_ERROR("Incomplete copy, wrote %zu of %zu", bytes, size); 4746 ret = -EFAULT; 4747 goto fail; 4748 } 4749 4750 return obj; 4751 4752 fail: 4753 i915_gem_object_put(obj); 4754 return ERR_PTR(ret); 4755 } 4756 4757 struct scatterlist * 4758 i915_gem_object_get_sg(struct drm_i915_gem_object *obj, 4759 unsigned int n, 4760 unsigned int *offset) 4761 { 4762 struct i915_gem_object_page_iter *iter = &obj->mm.get_page; 4763 struct scatterlist *sg; 4764 unsigned int idx, count; 4765 4766 might_sleep(); 4767 GEM_BUG_ON(n >= obj->base.size >> PAGE_SHIFT); 4768 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); 4769 4770 /* As we iterate forward through the sg, we record each entry in a 4771 * radixtree for quick repeated (backwards) lookups. If we have seen 4772 * this index previously, we will have an entry for it. 4773 * 4774 * Initial lookup is O(N), but this is amortized to O(1) for 4775 * sequential page access (where each new request is consecutive 4776 * to the previous one). Repeated lookups are O(lg(obj->base.size)), 4777 * i.e. O(1) with a large constant! 4778 */ 4779 if (n < READ_ONCE(iter->sg_idx)) 4780 goto lookup; 4781 4782 mutex_lock(&iter->lock); 4783 4784 /* We prefer to reuse the last sg so that repeated lookup of this 4785 * (or the subsequent) sg are fast - comparing against the last 4786 * sg is faster than going through the radixtree. 4787 */ 4788 4789 sg = iter->sg_pos; 4790 idx = iter->sg_idx; 4791 count = __sg_page_count(sg); 4792 4793 while (idx + count <= n) { 4794 unsigned long exception, i; 4795 int ret; 4796 4797 /* If we cannot allocate and insert this entry, or the 4798 * individual pages from this range, cancel updating the 4799 * sg_idx so that on this lookup we are forced to linearly 4800 * scan onwards, but on future lookups we will try the 4801 * insertion again (in which case we need to be careful of 4802 * the error return reporting that we have already inserted 4803 * this index). 4804 */ 4805 ret = radix_tree_insert(&iter->radix, idx, sg); 4806 if (ret && ret != -EEXIST) 4807 goto scan; 4808 4809 exception = 4810 RADIX_TREE_EXCEPTIONAL_ENTRY | 4811 idx << RADIX_TREE_EXCEPTIONAL_SHIFT; 4812 for (i = 1; i < count; i++) { 4813 ret = radix_tree_insert(&iter->radix, idx + i, 4814 (void *)exception); 4815 if (ret && ret != -EEXIST) 4816 goto scan; 4817 } 4818 4819 idx += count; 4820 sg = ____sg_next(sg); 4821 count = __sg_page_count(sg); 4822 } 4823 4824 scan: 4825 iter->sg_pos = sg; 4826 iter->sg_idx = idx; 4827 4828 mutex_unlock(&iter->lock); 4829 4830 if (unlikely(n < idx)) /* insertion completed by another thread */ 4831 goto lookup; 4832 4833 /* In case we failed to insert the entry into the radixtree, we need 4834 * to look beyond the current sg. 4835 */ 4836 while (idx + count <= n) { 4837 idx += count; 4838 sg = ____sg_next(sg); 4839 count = __sg_page_count(sg); 4840 } 4841 4842 *offset = n - idx; 4843 return sg; 4844 4845 lookup: 4846 rcu_read_lock(); 4847 4848 sg = radix_tree_lookup(&iter->radix, n); 4849 GEM_BUG_ON(!sg); 4850 4851 /* If this index is in the middle of multi-page sg entry, 4852 * the radixtree will contain an exceptional entry that points 4853 * to the start of that range. We will return the pointer to 4854 * the base page and the offset of this page within the 4855 * sg entry's range. 4856 */ 4857 *offset = 0; 4858 if (unlikely(radix_tree_exception(sg))) { 4859 unsigned long base = 4860 (unsigned long)sg >> RADIX_TREE_EXCEPTIONAL_SHIFT; 4861 4862 sg = radix_tree_lookup(&iter->radix, base); 4863 GEM_BUG_ON(!sg); 4864 4865 *offset = n - base; 4866 } 4867 4868 rcu_read_unlock(); 4869 4870 return sg; 4871 } 4872 4873 struct page * 4874 i915_gem_object_get_page(struct drm_i915_gem_object *obj, unsigned int n) 4875 { 4876 struct scatterlist *sg; 4877 unsigned int offset; 4878 4879 GEM_BUG_ON(!i915_gem_object_has_struct_page(obj)); 4880 4881 sg = i915_gem_object_get_sg(obj, n, &offset); 4882 return nth_page(sg_page(sg), offset); 4883 } 4884 4885 /* Like i915_gem_object_get_page(), but mark the returned page dirty */ 4886 struct page * 4887 i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, 4888 unsigned int n) 4889 { 4890 struct page *page; 4891 4892 page = i915_gem_object_get_page(obj, n); 4893 if (!obj->mm.dirty) 4894 set_page_dirty(page); 4895 4896 return page; 4897 } 4898 4899 dma_addr_t 4900 i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj, 4901 unsigned long n) 4902 { 4903 struct scatterlist *sg; 4904 unsigned int offset; 4905 4906 sg = i915_gem_object_get_sg(obj, n, &offset); 4907 return sg_dma_address(sg) + (offset << PAGE_SHIFT); 4908 } 4909