1 /* 2 * SPDX-License-Identifier: MIT 3 * 4 * Copyright © 2014-2016 Intel Corporation 5 */ 6 7 #include <drm/drm_cache.h> 8 #include <linux/vmalloc.h> 9 10 #include "gt/intel_gt.h" 11 #include "gt/intel_tlb.h" 12 13 #include "i915_drv.h" 14 #include "i915_gem_object.h" 15 #include "i915_scatterlist.h" 16 #include "i915_gem_lmem.h" 17 #include "i915_gem_mman.h" 18 19 void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj, 20 struct sg_table *pages) 21 { 22 struct drm_i915_private *i915 = to_i915(obj->base.dev); 23 unsigned long supported = RUNTIME_INFO(i915)->page_sizes; 24 bool shrinkable; 25 int i; 26 27 assert_object_held_shared(obj); 28 29 if (i915_gem_object_is_volatile(obj)) 30 obj->mm.madv = I915_MADV_DONTNEED; 31 32 /* Make the pages coherent with the GPU (flushing any swapin). */ 33 if (obj->cache_dirty) { 34 WARN_ON_ONCE(IS_DGFX(i915)); 35 obj->write_domain = 0; 36 if (i915_gem_object_has_struct_page(obj)) 37 drm_clflush_sg(pages); 38 obj->cache_dirty = false; 39 } 40 41 obj->mm.get_page.sg_pos = pages->sgl; 42 obj->mm.get_page.sg_idx = 0; 43 obj->mm.get_dma_page.sg_pos = pages->sgl; 44 obj->mm.get_dma_page.sg_idx = 0; 45 46 obj->mm.pages = pages; 47 48 obj->mm.page_sizes.phys = i915_sg_dma_sizes(pages->sgl); 49 GEM_BUG_ON(!obj->mm.page_sizes.phys); 50 51 /* 52 * Calculate the supported page-sizes which fit into the given 53 * sg_page_sizes. This will give us the page-sizes which we may be able 54 * to use opportunistically when later inserting into the GTT. For 55 * example if phys=2G, then in theory we should be able to use 1G, 2M, 56 * 64K or 4K pages, although in practice this will depend on a number of 57 * other factors. 58 */ 59 obj->mm.page_sizes.sg = 0; 60 for_each_set_bit(i, &supported, ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) { 61 if (obj->mm.page_sizes.phys & ~0u << i) 62 obj->mm.page_sizes.sg |= BIT(i); 63 } 64 GEM_BUG_ON(!HAS_PAGE_SIZES(i915, obj->mm.page_sizes.sg)); 65 66 shrinkable = i915_gem_object_is_shrinkable(obj); 67 68 if (i915_gem_object_is_tiled(obj) && 69 i915->gem_quirks & GEM_QUIRK_PIN_SWIZZLED_PAGES) { 70 GEM_BUG_ON(i915_gem_object_has_tiling_quirk(obj)); 71 i915_gem_object_set_tiling_quirk(obj); 72 GEM_BUG_ON(!list_empty(&obj->mm.link)); 73 atomic_inc(&obj->mm.shrink_pin); 74 shrinkable = false; 75 } 76 77 if (shrinkable && !i915_gem_object_has_self_managed_shrink_list(obj)) { 78 struct list_head *list; 79 unsigned long flags; 80 81 assert_object_held(obj); 82 spin_lock_irqsave(&i915->mm.obj_lock, flags); 83 84 i915->mm.shrink_count++; 85 i915->mm.shrink_memory += obj->base.size; 86 87 if (obj->mm.madv != I915_MADV_WILLNEED) 88 list = &i915->mm.purge_list; 89 else 90 list = &i915->mm.shrink_list; 91 list_add_tail(&obj->mm.link, list); 92 93 atomic_set(&obj->mm.shrink_pin, 0); 94 spin_unlock_irqrestore(&i915->mm.obj_lock, flags); 95 } 96 } 97 98 int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj) 99 { 100 struct drm_i915_private *i915 = to_i915(obj->base.dev); 101 int err; 102 103 assert_object_held_shared(obj); 104 105 if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) { 106 drm_dbg(&i915->drm, 107 "Attempting to obtain a purgeable object\n"); 108 return -EFAULT; 109 } 110 111 err = obj->ops->get_pages(obj); 112 GEM_BUG_ON(!err && !i915_gem_object_has_pages(obj)); 113 114 return err; 115 } 116 117 /* Ensure that the associated pages are gathered from the backing storage 118 * and pinned into our object. i915_gem_object_pin_pages() may be called 119 * multiple times before they are released by a single call to 120 * i915_gem_object_unpin_pages() - once the pages are no longer referenced 121 * either as a result of memory pressure (reaping pages under the shrinker) 122 * or as the object is itself released. 123 */ 124 int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj) 125 { 126 int err; 127 128 assert_object_held(obj); 129 130 assert_object_held_shared(obj); 131 132 if (unlikely(!i915_gem_object_has_pages(obj))) { 133 GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj)); 134 135 err = ____i915_gem_object_get_pages(obj); 136 if (err) 137 return err; 138 139 smp_mb__before_atomic(); 140 } 141 atomic_inc(&obj->mm.pages_pin_count); 142 143 return 0; 144 } 145 146 int i915_gem_object_pin_pages_unlocked(struct drm_i915_gem_object *obj) 147 { 148 struct i915_gem_ww_ctx ww; 149 int err; 150 151 i915_gem_ww_ctx_init(&ww, true); 152 retry: 153 err = i915_gem_object_lock(obj, &ww); 154 if (!err) 155 err = i915_gem_object_pin_pages(obj); 156 157 if (err == -EDEADLK) { 158 err = i915_gem_ww_ctx_backoff(&ww); 159 if (!err) 160 goto retry; 161 } 162 i915_gem_ww_ctx_fini(&ww); 163 return err; 164 } 165 166 /* Immediately discard the backing storage */ 167 int i915_gem_object_truncate(struct drm_i915_gem_object *obj) 168 { 169 if (obj->ops->truncate) 170 return obj->ops->truncate(obj); 171 172 return 0; 173 } 174 175 static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj) 176 { 177 struct radix_tree_iter iter; 178 void __rcu **slot; 179 180 rcu_read_lock(); 181 radix_tree_for_each_slot(slot, &obj->mm.get_page.radix, &iter, 0) 182 radix_tree_delete(&obj->mm.get_page.radix, iter.index); 183 radix_tree_for_each_slot(slot, &obj->mm.get_dma_page.radix, &iter, 0) 184 radix_tree_delete(&obj->mm.get_dma_page.radix, iter.index); 185 rcu_read_unlock(); 186 } 187 188 static void unmap_object(struct drm_i915_gem_object *obj, void *ptr) 189 { 190 if (is_vmalloc_addr(ptr)) 191 vunmap(ptr); 192 } 193 194 static void flush_tlb_invalidate(struct drm_i915_gem_object *obj) 195 { 196 struct drm_i915_private *i915 = to_i915(obj->base.dev); 197 struct intel_gt *gt; 198 int id; 199 200 for_each_gt(gt, i915, id) { 201 if (!obj->mm.tlb[id]) 202 continue; 203 204 intel_gt_invalidate_tlb_full(gt, obj->mm.tlb[id]); 205 obj->mm.tlb[id] = 0; 206 } 207 } 208 209 struct sg_table * 210 __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj) 211 { 212 struct sg_table *pages; 213 214 assert_object_held_shared(obj); 215 216 pages = fetch_and_zero(&obj->mm.pages); 217 if (IS_ERR_OR_NULL(pages)) 218 return pages; 219 220 if (i915_gem_object_is_volatile(obj)) 221 obj->mm.madv = I915_MADV_WILLNEED; 222 223 if (!i915_gem_object_has_self_managed_shrink_list(obj)) 224 i915_gem_object_make_unshrinkable(obj); 225 226 if (obj->mm.mapping) { 227 unmap_object(obj, page_mask_bits(obj->mm.mapping)); 228 obj->mm.mapping = NULL; 229 } 230 231 __i915_gem_object_reset_page_iter(obj); 232 obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0; 233 234 flush_tlb_invalidate(obj); 235 236 return pages; 237 } 238 239 int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj) 240 { 241 struct sg_table *pages; 242 243 if (i915_gem_object_has_pinned_pages(obj)) 244 return -EBUSY; 245 246 /* May be called by shrinker from within get_pages() (on another bo) */ 247 assert_object_held_shared(obj); 248 249 i915_gem_object_release_mmap_offset(obj); 250 251 /* 252 * ->put_pages might need to allocate memory for the bit17 swizzle 253 * array, hence protect them from being reaped by removing them from gtt 254 * lists early. 255 */ 256 pages = __i915_gem_object_unset_pages(obj); 257 258 /* 259 * XXX Temporary hijinx to avoid updating all backends to handle 260 * NULL pages. In the future, when we have more asynchronous 261 * get_pages backends we should be better able to handle the 262 * cancellation of the async task in a more uniform manner. 263 */ 264 if (!IS_ERR_OR_NULL(pages)) 265 obj->ops->put_pages(obj, pages); 266 267 return 0; 268 } 269 270 /* The 'mapping' part of i915_gem_object_pin_map() below */ 271 static void *i915_gem_object_map_page(struct drm_i915_gem_object *obj, 272 enum i915_map_type type) 273 { 274 unsigned long n_pages = obj->base.size >> PAGE_SHIFT, i; 275 struct page *stack[32], **pages = stack, *page; 276 struct sgt_iter iter; 277 pgprot_t pgprot; 278 void *vaddr; 279 280 switch (type) { 281 default: 282 MISSING_CASE(type); 283 fallthrough; /* to use PAGE_KERNEL anyway */ 284 case I915_MAP_WB: 285 /* 286 * On 32b, highmem using a finite set of indirect PTE (i.e. 287 * vmap) to provide virtual mappings of the high pages. 288 * As these are finite, map_new_virtual() must wait for some 289 * other kmap() to finish when it runs out. If we map a large 290 * number of objects, there is no method for it to tell us 291 * to release the mappings, and we deadlock. 292 * 293 * However, if we make an explicit vmap of the page, that 294 * uses a larger vmalloc arena, and also has the ability 295 * to tell us to release unwanted mappings. Most importantly, 296 * it will fail and propagate an error instead of waiting 297 * forever. 298 * 299 * So if the page is beyond the 32b boundary, make an explicit 300 * vmap. 301 */ 302 if (n_pages == 1 && !PageHighMem(sg_page(obj->mm.pages->sgl))) 303 return page_address(sg_page(obj->mm.pages->sgl)); 304 pgprot = PAGE_KERNEL; 305 break; 306 case I915_MAP_WC: 307 pgprot = pgprot_writecombine(PAGE_KERNEL_IO); 308 break; 309 } 310 311 if (n_pages > ARRAY_SIZE(stack)) { 312 /* Too big for stack -- allocate temporary array instead */ 313 pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL); 314 if (!pages) 315 return ERR_PTR(-ENOMEM); 316 } 317 318 i = 0; 319 for_each_sgt_page(page, iter, obj->mm.pages) 320 pages[i++] = page; 321 vaddr = vmap(pages, n_pages, 0, pgprot); 322 if (pages != stack) 323 kvfree(pages); 324 325 return vaddr ?: ERR_PTR(-ENOMEM); 326 } 327 328 static void *i915_gem_object_map_pfn(struct drm_i915_gem_object *obj, 329 enum i915_map_type type) 330 { 331 resource_size_t iomap = obj->mm.region->iomap.base - 332 obj->mm.region->region.start; 333 unsigned long n_pfn = obj->base.size >> PAGE_SHIFT; 334 unsigned long stack[32], *pfns = stack, i; 335 struct sgt_iter iter; 336 dma_addr_t addr; 337 void *vaddr; 338 339 GEM_BUG_ON(type != I915_MAP_WC); 340 341 if (n_pfn > ARRAY_SIZE(stack)) { 342 /* Too big for stack -- allocate temporary array instead */ 343 pfns = kvmalloc_array(n_pfn, sizeof(*pfns), GFP_KERNEL); 344 if (!pfns) 345 return ERR_PTR(-ENOMEM); 346 } 347 348 i = 0; 349 for_each_sgt_daddr(addr, iter, obj->mm.pages) 350 pfns[i++] = (iomap + addr) >> PAGE_SHIFT; 351 vaddr = vmap_pfn(pfns, n_pfn, pgprot_writecombine(PAGE_KERNEL_IO)); 352 if (pfns != stack) 353 kvfree(pfns); 354 355 return vaddr ?: ERR_PTR(-ENOMEM); 356 } 357 358 /* get, pin, and map the pages of the object into kernel space */ 359 void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj, 360 enum i915_map_type type) 361 { 362 enum i915_map_type has_type; 363 bool pinned; 364 void *ptr; 365 int err; 366 367 if (!i915_gem_object_has_struct_page(obj) && 368 !i915_gem_object_has_iomem(obj)) 369 return ERR_PTR(-ENXIO); 370 371 if (WARN_ON_ONCE(obj->flags & I915_BO_ALLOC_GPU_ONLY)) 372 return ERR_PTR(-EINVAL); 373 374 assert_object_held(obj); 375 376 pinned = !(type & I915_MAP_OVERRIDE); 377 type &= ~I915_MAP_OVERRIDE; 378 379 if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) { 380 if (unlikely(!i915_gem_object_has_pages(obj))) { 381 GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj)); 382 383 err = ____i915_gem_object_get_pages(obj); 384 if (err) 385 return ERR_PTR(err); 386 387 smp_mb__before_atomic(); 388 } 389 atomic_inc(&obj->mm.pages_pin_count); 390 pinned = false; 391 } 392 GEM_BUG_ON(!i915_gem_object_has_pages(obj)); 393 394 /* 395 * For discrete our CPU mappings needs to be consistent in order to 396 * function correctly on !x86. When mapping things through TTM, we use 397 * the same rules to determine the caching type. 398 * 399 * The caching rules, starting from DG1: 400 * 401 * - If the object can be placed in device local-memory, then the 402 * pages should be allocated and mapped as write-combined only. 403 * 404 * - Everything else is always allocated and mapped as write-back, 405 * with the guarantee that everything is also coherent with the 406 * GPU. 407 * 408 * Internal users of lmem are already expected to get this right, so no 409 * fudging needed there. 410 */ 411 if (i915_gem_object_placement_possible(obj, INTEL_MEMORY_LOCAL)) { 412 if (type != I915_MAP_WC && !obj->mm.n_placements) { 413 ptr = ERR_PTR(-ENODEV); 414 goto err_unpin; 415 } 416 417 type = I915_MAP_WC; 418 } else if (IS_DGFX(to_i915(obj->base.dev))) { 419 type = I915_MAP_WB; 420 } 421 422 ptr = page_unpack_bits(obj->mm.mapping, &has_type); 423 if (ptr && has_type != type) { 424 if (pinned) { 425 ptr = ERR_PTR(-EBUSY); 426 goto err_unpin; 427 } 428 429 unmap_object(obj, ptr); 430 431 ptr = obj->mm.mapping = NULL; 432 } 433 434 if (!ptr) { 435 err = i915_gem_object_wait_moving_fence(obj, true); 436 if (err) { 437 ptr = ERR_PTR(err); 438 goto err_unpin; 439 } 440 441 if (GEM_WARN_ON(type == I915_MAP_WC && !pat_enabled())) 442 ptr = ERR_PTR(-ENODEV); 443 else if (i915_gem_object_has_struct_page(obj)) 444 ptr = i915_gem_object_map_page(obj, type); 445 else 446 ptr = i915_gem_object_map_pfn(obj, type); 447 if (IS_ERR(ptr)) 448 goto err_unpin; 449 450 obj->mm.mapping = page_pack_bits(ptr, type); 451 } 452 453 return ptr; 454 455 err_unpin: 456 atomic_dec(&obj->mm.pages_pin_count); 457 return ptr; 458 } 459 460 void *i915_gem_object_pin_map_unlocked(struct drm_i915_gem_object *obj, 461 enum i915_map_type type) 462 { 463 void *ret; 464 465 i915_gem_object_lock(obj, NULL); 466 ret = i915_gem_object_pin_map(obj, type); 467 i915_gem_object_unlock(obj); 468 469 return ret; 470 } 471 472 void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj, 473 unsigned long offset, 474 unsigned long size) 475 { 476 enum i915_map_type has_type; 477 void *ptr; 478 479 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); 480 GEM_BUG_ON(range_overflows_t(typeof(obj->base.size), 481 offset, size, obj->base.size)); 482 483 wmb(); /* let all previous writes be visible to coherent partners */ 484 obj->mm.dirty = true; 485 486 if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE) 487 return; 488 489 ptr = page_unpack_bits(obj->mm.mapping, &has_type); 490 if (has_type == I915_MAP_WC) 491 return; 492 493 drm_clflush_virt_range(ptr + offset, size); 494 if (size == obj->base.size) { 495 obj->write_domain &= ~I915_GEM_DOMAIN_CPU; 496 obj->cache_dirty = false; 497 } 498 } 499 500 void __i915_gem_object_release_map(struct drm_i915_gem_object *obj) 501 { 502 GEM_BUG_ON(!obj->mm.mapping); 503 504 /* 505 * We allow removing the mapping from underneath pinned pages! 506 * 507 * Furthermore, since this is an unsafe operation reserved only 508 * for construction time manipulation, we ignore locking prudence. 509 */ 510 unmap_object(obj, page_mask_bits(fetch_and_zero(&obj->mm.mapping))); 511 512 i915_gem_object_unpin_map(obj); 513 } 514 515 struct scatterlist * 516 __i915_gem_object_page_iter_get_sg(struct drm_i915_gem_object *obj, 517 struct i915_gem_object_page_iter *iter, 518 pgoff_t n, 519 unsigned int *offset) 520 521 { 522 const bool dma = iter == &obj->mm.get_dma_page || 523 iter == &obj->ttm.get_io_page; 524 unsigned int idx, count; 525 struct scatterlist *sg; 526 527 might_sleep(); 528 GEM_BUG_ON(n >= obj->base.size >> PAGE_SHIFT); 529 if (!i915_gem_object_has_pinned_pages(obj)) 530 assert_object_held(obj); 531 532 /* As we iterate forward through the sg, we record each entry in a 533 * radixtree for quick repeated (backwards) lookups. If we have seen 534 * this index previously, we will have an entry for it. 535 * 536 * Initial lookup is O(N), but this is amortized to O(1) for 537 * sequential page access (where each new request is consecutive 538 * to the previous one). Repeated lookups are O(lg(obj->base.size)), 539 * i.e. O(1) with a large constant! 540 */ 541 if (n < READ_ONCE(iter->sg_idx)) 542 goto lookup; 543 544 mutex_lock(&iter->lock); 545 546 /* We prefer to reuse the last sg so that repeated lookup of this 547 * (or the subsequent) sg are fast - comparing against the last 548 * sg is faster than going through the radixtree. 549 */ 550 551 sg = iter->sg_pos; 552 idx = iter->sg_idx; 553 count = dma ? __sg_dma_page_count(sg) : __sg_page_count(sg); 554 555 while (idx + count <= n) { 556 void *entry; 557 unsigned long i; 558 int ret; 559 560 /* If we cannot allocate and insert this entry, or the 561 * individual pages from this range, cancel updating the 562 * sg_idx so that on this lookup we are forced to linearly 563 * scan onwards, but on future lookups we will try the 564 * insertion again (in which case we need to be careful of 565 * the error return reporting that we have already inserted 566 * this index). 567 */ 568 ret = radix_tree_insert(&iter->radix, idx, sg); 569 if (ret && ret != -EEXIST) 570 goto scan; 571 572 entry = xa_mk_value(idx); 573 for (i = 1; i < count; i++) { 574 ret = radix_tree_insert(&iter->radix, idx + i, entry); 575 if (ret && ret != -EEXIST) 576 goto scan; 577 } 578 579 idx += count; 580 sg = ____sg_next(sg); 581 count = dma ? __sg_dma_page_count(sg) : __sg_page_count(sg); 582 } 583 584 scan: 585 iter->sg_pos = sg; 586 iter->sg_idx = idx; 587 588 mutex_unlock(&iter->lock); 589 590 if (unlikely(n < idx)) /* insertion completed by another thread */ 591 goto lookup; 592 593 /* In case we failed to insert the entry into the radixtree, we need 594 * to look beyond the current sg. 595 */ 596 while (idx + count <= n) { 597 idx += count; 598 sg = ____sg_next(sg); 599 count = dma ? __sg_dma_page_count(sg) : __sg_page_count(sg); 600 } 601 602 *offset = n - idx; 603 return sg; 604 605 lookup: 606 rcu_read_lock(); 607 608 sg = radix_tree_lookup(&iter->radix, n); 609 GEM_BUG_ON(!sg); 610 611 /* If this index is in the middle of multi-page sg entry, 612 * the radix tree will contain a value entry that points 613 * to the start of that range. We will return the pointer to 614 * the base page and the offset of this page within the 615 * sg entry's range. 616 */ 617 *offset = 0; 618 if (unlikely(xa_is_value(sg))) { 619 unsigned long base = xa_to_value(sg); 620 621 sg = radix_tree_lookup(&iter->radix, base); 622 GEM_BUG_ON(!sg); 623 624 *offset = n - base; 625 } 626 627 rcu_read_unlock(); 628 629 return sg; 630 } 631 632 struct page * 633 __i915_gem_object_get_page(struct drm_i915_gem_object *obj, pgoff_t n) 634 { 635 struct scatterlist *sg; 636 unsigned int offset; 637 638 GEM_BUG_ON(!i915_gem_object_has_struct_page(obj)); 639 640 sg = i915_gem_object_get_sg(obj, n, &offset); 641 return nth_page(sg_page(sg), offset); 642 } 643 644 /* Like i915_gem_object_get_page(), but mark the returned page dirty */ 645 struct page * 646 __i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, pgoff_t n) 647 { 648 struct page *page; 649 650 page = i915_gem_object_get_page(obj, n); 651 if (!obj->mm.dirty) 652 set_page_dirty(page); 653 654 return page; 655 } 656 657 dma_addr_t 658 __i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj, 659 pgoff_t n, unsigned int *len) 660 { 661 struct scatterlist *sg; 662 unsigned int offset; 663 664 sg = i915_gem_object_get_sg_dma(obj, n, &offset); 665 666 if (len) 667 *len = sg_dma_len(sg) - (offset << PAGE_SHIFT); 668 669 return sg_dma_address(sg) + (offset << PAGE_SHIFT); 670 } 671 672 dma_addr_t 673 __i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj, pgoff_t n) 674 { 675 return i915_gem_object_get_dma_address_len(obj, n, NULL); 676 } 677