1 /* 2 * Copyright © 2010 Daniel Vetter 3 * Copyright © 2011-2014 Intel Corporation 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice (including the next 13 * paragraph) shall be included in all copies or substantial portions of the 14 * Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 22 * IN THE SOFTWARE. 23 * 24 */ 25 26 #include <linux/slab.h> /* fault-inject.h is not standalone! */ 27 28 #include <linux/fault-inject.h> 29 #include <linux/log2.h> 30 #include <linux/random.h> 31 #include <linux/seq_file.h> 32 #include <linux/stop_machine.h> 33 34 #include <asm/set_memory.h> 35 36 #include <drm/i915_drm.h> 37 38 #include "i915_drv.h" 39 #include "i915_vgpu.h" 40 #include "i915_reset.h" 41 #include "i915_trace.h" 42 #include "intel_drv.h" 43 #include "intel_frontbuffer.h" 44 45 #define I915_GFP_ALLOW_FAIL (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN) 46 47 /** 48 * DOC: Global GTT views 49 * 50 * Background and previous state 51 * 52 * Historically objects could exists (be bound) in global GTT space only as 53 * singular instances with a view representing all of the object's backing pages 54 * in a linear fashion. This view will be called a normal view. 55 * 56 * To support multiple views of the same object, where the number of mapped 57 * pages is not equal to the backing store, or where the layout of the pages 58 * is not linear, concept of a GGTT view was added. 59 * 60 * One example of an alternative view is a stereo display driven by a single 61 * image. In this case we would have a framebuffer looking like this 62 * (2x2 pages): 63 * 64 * 12 65 * 34 66 * 67 * Above would represent a normal GGTT view as normally mapped for GPU or CPU 68 * rendering. In contrast, fed to the display engine would be an alternative 69 * view which could look something like this: 70 * 71 * 1212 72 * 3434 73 * 74 * In this example both the size and layout of pages in the alternative view is 75 * different from the normal view. 76 * 77 * Implementation and usage 78 * 79 * GGTT views are implemented using VMAs and are distinguished via enum 80 * i915_ggtt_view_type and struct i915_ggtt_view. 81 * 82 * A new flavour of core GEM functions which work with GGTT bound objects were 83 * added with the _ggtt_ infix, and sometimes with _view postfix to avoid 84 * renaming in large amounts of code. They take the struct i915_ggtt_view 85 * parameter encapsulating all metadata required to implement a view. 86 * 87 * As a helper for callers which are only interested in the normal view, 88 * globally const i915_ggtt_view_normal singleton instance exists. All old core 89 * GEM API functions, the ones not taking the view parameter, are operating on, 90 * or with the normal GGTT view. 91 * 92 * Code wanting to add or use a new GGTT view needs to: 93 * 94 * 1. Add a new enum with a suitable name. 95 * 2. Extend the metadata in the i915_ggtt_view structure if required. 96 * 3. Add support to i915_get_vma_pages(). 97 * 98 * New views are required to build a scatter-gather table from within the 99 * i915_get_vma_pages function. This table is stored in the vma.ggtt_view and 100 * exists for the lifetime of an VMA. 101 * 102 * Core API is designed to have copy semantics which means that passed in 103 * struct i915_ggtt_view does not need to be persistent (left around after 104 * calling the core API functions). 105 * 106 */ 107 108 static int 109 i915_get_ggtt_vma_pages(struct i915_vma *vma); 110 111 static void gen6_ggtt_invalidate(struct drm_i915_private *dev_priv) 112 { 113 /* 114 * Note that as an uncached mmio write, this will flush the 115 * WCB of the writes into the GGTT before it triggers the invalidate. 116 */ 117 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); 118 } 119 120 static void guc_ggtt_invalidate(struct drm_i915_private *dev_priv) 121 { 122 gen6_ggtt_invalidate(dev_priv); 123 I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE); 124 } 125 126 static void gmch_ggtt_invalidate(struct drm_i915_private *dev_priv) 127 { 128 intel_gtt_chipset_flush(); 129 } 130 131 static inline void i915_ggtt_invalidate(struct drm_i915_private *i915) 132 { 133 i915->ggtt.invalidate(i915); 134 } 135 136 static int ppgtt_bind_vma(struct i915_vma *vma, 137 enum i915_cache_level cache_level, 138 u32 unused) 139 { 140 u32 pte_flags; 141 int err; 142 143 if (!(vma->flags & I915_VMA_LOCAL_BIND)) { 144 err = vma->vm->allocate_va_range(vma->vm, 145 vma->node.start, vma->size); 146 if (err) 147 return err; 148 } 149 150 /* Applicable to VLV, and gen8+ */ 151 pte_flags = 0; 152 if (i915_gem_object_is_readonly(vma->obj)) 153 pte_flags |= PTE_READ_ONLY; 154 155 vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags); 156 157 return 0; 158 } 159 160 static void ppgtt_unbind_vma(struct i915_vma *vma) 161 { 162 vma->vm->clear_range(vma->vm, vma->node.start, vma->size); 163 } 164 165 static int ppgtt_set_pages(struct i915_vma *vma) 166 { 167 GEM_BUG_ON(vma->pages); 168 169 vma->pages = vma->obj->mm.pages; 170 171 vma->page_sizes = vma->obj->mm.page_sizes; 172 173 return 0; 174 } 175 176 static void clear_pages(struct i915_vma *vma) 177 { 178 GEM_BUG_ON(!vma->pages); 179 180 if (vma->pages != vma->obj->mm.pages) { 181 sg_free_table(vma->pages); 182 kfree(vma->pages); 183 } 184 vma->pages = NULL; 185 186 memset(&vma->page_sizes, 0, sizeof(vma->page_sizes)); 187 } 188 189 static u64 gen8_pte_encode(dma_addr_t addr, 190 enum i915_cache_level level, 191 u32 flags) 192 { 193 gen8_pte_t pte = addr | _PAGE_PRESENT | _PAGE_RW; 194 195 if (unlikely(flags & PTE_READ_ONLY)) 196 pte &= ~_PAGE_RW; 197 198 switch (level) { 199 case I915_CACHE_NONE: 200 pte |= PPAT_UNCACHED; 201 break; 202 case I915_CACHE_WT: 203 pte |= PPAT_DISPLAY_ELLC; 204 break; 205 default: 206 pte |= PPAT_CACHED; 207 break; 208 } 209 210 return pte; 211 } 212 213 static gen8_pde_t gen8_pde_encode(const dma_addr_t addr, 214 const enum i915_cache_level level) 215 { 216 gen8_pde_t pde = _PAGE_PRESENT | _PAGE_RW; 217 pde |= addr; 218 if (level != I915_CACHE_NONE) 219 pde |= PPAT_CACHED_PDE; 220 else 221 pde |= PPAT_UNCACHED; 222 return pde; 223 } 224 225 #define gen8_pdpe_encode gen8_pde_encode 226 #define gen8_pml4e_encode gen8_pde_encode 227 228 static u64 snb_pte_encode(dma_addr_t addr, 229 enum i915_cache_level level, 230 u32 flags) 231 { 232 gen6_pte_t pte = GEN6_PTE_VALID; 233 pte |= GEN6_PTE_ADDR_ENCODE(addr); 234 235 switch (level) { 236 case I915_CACHE_L3_LLC: 237 case I915_CACHE_LLC: 238 pte |= GEN6_PTE_CACHE_LLC; 239 break; 240 case I915_CACHE_NONE: 241 pte |= GEN6_PTE_UNCACHED; 242 break; 243 default: 244 MISSING_CASE(level); 245 } 246 247 return pte; 248 } 249 250 static u64 ivb_pte_encode(dma_addr_t addr, 251 enum i915_cache_level level, 252 u32 flags) 253 { 254 gen6_pte_t pte = GEN6_PTE_VALID; 255 pte |= GEN6_PTE_ADDR_ENCODE(addr); 256 257 switch (level) { 258 case I915_CACHE_L3_LLC: 259 pte |= GEN7_PTE_CACHE_L3_LLC; 260 break; 261 case I915_CACHE_LLC: 262 pte |= GEN6_PTE_CACHE_LLC; 263 break; 264 case I915_CACHE_NONE: 265 pte |= GEN6_PTE_UNCACHED; 266 break; 267 default: 268 MISSING_CASE(level); 269 } 270 271 return pte; 272 } 273 274 static u64 byt_pte_encode(dma_addr_t addr, 275 enum i915_cache_level level, 276 u32 flags) 277 { 278 gen6_pte_t pte = GEN6_PTE_VALID; 279 pte |= GEN6_PTE_ADDR_ENCODE(addr); 280 281 if (!(flags & PTE_READ_ONLY)) 282 pte |= BYT_PTE_WRITEABLE; 283 284 if (level != I915_CACHE_NONE) 285 pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES; 286 287 return pte; 288 } 289 290 static u64 hsw_pte_encode(dma_addr_t addr, 291 enum i915_cache_level level, 292 u32 flags) 293 { 294 gen6_pte_t pte = GEN6_PTE_VALID; 295 pte |= HSW_PTE_ADDR_ENCODE(addr); 296 297 if (level != I915_CACHE_NONE) 298 pte |= HSW_WB_LLC_AGE3; 299 300 return pte; 301 } 302 303 static u64 iris_pte_encode(dma_addr_t addr, 304 enum i915_cache_level level, 305 u32 flags) 306 { 307 gen6_pte_t pte = GEN6_PTE_VALID; 308 pte |= HSW_PTE_ADDR_ENCODE(addr); 309 310 switch (level) { 311 case I915_CACHE_NONE: 312 break; 313 case I915_CACHE_WT: 314 pte |= HSW_WT_ELLC_LLC_AGE3; 315 break; 316 default: 317 pte |= HSW_WB_ELLC_LLC_AGE3; 318 break; 319 } 320 321 return pte; 322 } 323 324 static void stash_init(struct pagestash *stash) 325 { 326 pagevec_init(&stash->pvec); 327 spin_lock_init(&stash->lock); 328 } 329 330 static struct page *stash_pop_page(struct pagestash *stash) 331 { 332 struct page *page = NULL; 333 334 spin_lock(&stash->lock); 335 if (likely(stash->pvec.nr)) 336 page = stash->pvec.pages[--stash->pvec.nr]; 337 spin_unlock(&stash->lock); 338 339 return page; 340 } 341 342 static void stash_push_pagevec(struct pagestash *stash, struct pagevec *pvec) 343 { 344 int nr; 345 346 spin_lock_nested(&stash->lock, SINGLE_DEPTH_NESTING); 347 348 nr = min_t(int, pvec->nr, pagevec_space(&stash->pvec)); 349 memcpy(stash->pvec.pages + stash->pvec.nr, 350 pvec->pages + pvec->nr - nr, 351 sizeof(pvec->pages[0]) * nr); 352 stash->pvec.nr += nr; 353 354 spin_unlock(&stash->lock); 355 356 pvec->nr -= nr; 357 } 358 359 static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp) 360 { 361 struct pagevec stack; 362 struct page *page; 363 364 if (I915_SELFTEST_ONLY(should_fail(&vm->fault_attr, 1))) 365 i915_gem_shrink_all(vm->i915); 366 367 page = stash_pop_page(&vm->free_pages); 368 if (page) 369 return page; 370 371 if (!vm->pt_kmap_wc) 372 return alloc_page(gfp); 373 374 /* Look in our global stash of WC pages... */ 375 page = stash_pop_page(&vm->i915->mm.wc_stash); 376 if (page) 377 return page; 378 379 /* 380 * Otherwise batch allocate pages to amortize cost of set_pages_wc. 381 * 382 * We have to be careful as page allocation may trigger the shrinker 383 * (via direct reclaim) which will fill up the WC stash underneath us. 384 * So we add our WB pages into a temporary pvec on the stack and merge 385 * them into the WC stash after all the allocations are complete. 386 */ 387 pagevec_init(&stack); 388 do { 389 struct page *page; 390 391 page = alloc_page(gfp); 392 if (unlikely(!page)) 393 break; 394 395 stack.pages[stack.nr++] = page; 396 } while (pagevec_space(&stack)); 397 398 if (stack.nr && !set_pages_array_wc(stack.pages, stack.nr)) { 399 page = stack.pages[--stack.nr]; 400 401 /* Merge spare WC pages to the global stash */ 402 stash_push_pagevec(&vm->i915->mm.wc_stash, &stack); 403 404 /* Push any surplus WC pages onto the local VM stash */ 405 if (stack.nr) 406 stash_push_pagevec(&vm->free_pages, &stack); 407 } 408 409 /* Return unwanted leftovers */ 410 if (unlikely(stack.nr)) { 411 WARN_ON_ONCE(set_pages_array_wb(stack.pages, stack.nr)); 412 __pagevec_release(&stack); 413 } 414 415 return page; 416 } 417 418 static void vm_free_pages_release(struct i915_address_space *vm, 419 bool immediate) 420 { 421 struct pagevec *pvec = &vm->free_pages.pvec; 422 struct pagevec stack; 423 424 lockdep_assert_held(&vm->free_pages.lock); 425 GEM_BUG_ON(!pagevec_count(pvec)); 426 427 if (vm->pt_kmap_wc) { 428 /* 429 * When we use WC, first fill up the global stash and then 430 * only if full immediately free the overflow. 431 */ 432 stash_push_pagevec(&vm->i915->mm.wc_stash, pvec); 433 434 /* 435 * As we have made some room in the VM's free_pages, 436 * we can wait for it to fill again. Unless we are 437 * inside i915_address_space_fini() and must 438 * immediately release the pages! 439 */ 440 if (pvec->nr <= (immediate ? 0 : PAGEVEC_SIZE - 1)) 441 return; 442 443 /* 444 * We have to drop the lock to allow ourselves to sleep, 445 * so take a copy of the pvec and clear the stash for 446 * others to use it as we sleep. 447 */ 448 stack = *pvec; 449 pagevec_reinit(pvec); 450 spin_unlock(&vm->free_pages.lock); 451 452 pvec = &stack; 453 set_pages_array_wb(pvec->pages, pvec->nr); 454 455 spin_lock(&vm->free_pages.lock); 456 } 457 458 __pagevec_release(pvec); 459 } 460 461 static void vm_free_page(struct i915_address_space *vm, struct page *page) 462 { 463 /* 464 * On !llc, we need to change the pages back to WB. We only do so 465 * in bulk, so we rarely need to change the page attributes here, 466 * but doing so requires a stop_machine() from deep inside arch/x86/mm. 467 * To make detection of the possible sleep more likely, use an 468 * unconditional might_sleep() for everybody. 469 */ 470 might_sleep(); 471 spin_lock(&vm->free_pages.lock); 472 if (!pagevec_add(&vm->free_pages.pvec, page)) 473 vm_free_pages_release(vm, false); 474 spin_unlock(&vm->free_pages.lock); 475 } 476 477 static void i915_address_space_init(struct i915_address_space *vm, int subclass) 478 { 479 /* 480 * The vm->mutex must be reclaim safe (for use in the shrinker). 481 * Do a dummy acquire now under fs_reclaim so that any allocation 482 * attempt holding the lock is immediately reported by lockdep. 483 */ 484 mutex_init(&vm->mutex); 485 lockdep_set_subclass(&vm->mutex, subclass); 486 i915_gem_shrinker_taints_mutex(vm->i915, &vm->mutex); 487 488 GEM_BUG_ON(!vm->total); 489 drm_mm_init(&vm->mm, 0, vm->total); 490 vm->mm.head_node.color = I915_COLOR_UNEVICTABLE; 491 492 stash_init(&vm->free_pages); 493 494 INIT_LIST_HEAD(&vm->unbound_list); 495 INIT_LIST_HEAD(&vm->bound_list); 496 } 497 498 static void i915_address_space_fini(struct i915_address_space *vm) 499 { 500 spin_lock(&vm->free_pages.lock); 501 if (pagevec_count(&vm->free_pages.pvec)) 502 vm_free_pages_release(vm, true); 503 GEM_BUG_ON(pagevec_count(&vm->free_pages.pvec)); 504 spin_unlock(&vm->free_pages.lock); 505 506 drm_mm_takedown(&vm->mm); 507 508 mutex_destroy(&vm->mutex); 509 } 510 511 static int __setup_page_dma(struct i915_address_space *vm, 512 struct i915_page_dma *p, 513 gfp_t gfp) 514 { 515 p->page = vm_alloc_page(vm, gfp | I915_GFP_ALLOW_FAIL); 516 if (unlikely(!p->page)) 517 return -ENOMEM; 518 519 p->daddr = dma_map_page_attrs(vm->dma, 520 p->page, 0, PAGE_SIZE, 521 PCI_DMA_BIDIRECTIONAL, 522 DMA_ATTR_SKIP_CPU_SYNC | 523 DMA_ATTR_NO_WARN); 524 if (unlikely(dma_mapping_error(vm->dma, p->daddr))) { 525 vm_free_page(vm, p->page); 526 return -ENOMEM; 527 } 528 529 return 0; 530 } 531 532 static int setup_page_dma(struct i915_address_space *vm, 533 struct i915_page_dma *p) 534 { 535 return __setup_page_dma(vm, p, __GFP_HIGHMEM); 536 } 537 538 static void cleanup_page_dma(struct i915_address_space *vm, 539 struct i915_page_dma *p) 540 { 541 dma_unmap_page(vm->dma, p->daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 542 vm_free_page(vm, p->page); 543 } 544 545 #define kmap_atomic_px(px) kmap_atomic(px_base(px)->page) 546 547 #define setup_px(vm, px) setup_page_dma((vm), px_base(px)) 548 #define cleanup_px(vm, px) cleanup_page_dma((vm), px_base(px)) 549 #define fill_px(vm, px, v) fill_page_dma((vm), px_base(px), (v)) 550 #define fill32_px(vm, px, v) fill_page_dma_32((vm), px_base(px), (v)) 551 552 static void fill_page_dma(struct i915_address_space *vm, 553 struct i915_page_dma *p, 554 const u64 val) 555 { 556 u64 * const vaddr = kmap_atomic(p->page); 557 558 memset64(vaddr, val, PAGE_SIZE / sizeof(val)); 559 560 kunmap_atomic(vaddr); 561 } 562 563 static void fill_page_dma_32(struct i915_address_space *vm, 564 struct i915_page_dma *p, 565 const u32 v) 566 { 567 fill_page_dma(vm, p, (u64)v << 32 | v); 568 } 569 570 static int 571 setup_scratch_page(struct i915_address_space *vm, gfp_t gfp) 572 { 573 unsigned long size; 574 575 /* 576 * In order to utilize 64K pages for an object with a size < 2M, we will 577 * need to support a 64K scratch page, given that every 16th entry for a 578 * page-table operating in 64K mode must point to a properly aligned 64K 579 * region, including any PTEs which happen to point to scratch. 580 * 581 * This is only relevant for the 48b PPGTT where we support 582 * huge-gtt-pages, see also i915_vma_insert(). However, as we share the 583 * scratch (read-only) between all vm, we create one 64k scratch page 584 * for all. 585 */ 586 size = I915_GTT_PAGE_SIZE_4K; 587 if (i915_vm_is_48bit(vm) && 588 HAS_PAGE_SIZES(vm->i915, I915_GTT_PAGE_SIZE_64K)) { 589 size = I915_GTT_PAGE_SIZE_64K; 590 gfp |= __GFP_NOWARN; 591 } 592 gfp |= __GFP_ZERO | __GFP_RETRY_MAYFAIL; 593 594 do { 595 int order = get_order(size); 596 struct page *page; 597 dma_addr_t addr; 598 599 page = alloc_pages(gfp, order); 600 if (unlikely(!page)) 601 goto skip; 602 603 addr = dma_map_page_attrs(vm->dma, 604 page, 0, size, 605 PCI_DMA_BIDIRECTIONAL, 606 DMA_ATTR_SKIP_CPU_SYNC | 607 DMA_ATTR_NO_WARN); 608 if (unlikely(dma_mapping_error(vm->dma, addr))) 609 goto free_page; 610 611 if (unlikely(!IS_ALIGNED(addr, size))) 612 goto unmap_page; 613 614 vm->scratch_page.page = page; 615 vm->scratch_page.daddr = addr; 616 vm->scratch_page.order = order; 617 return 0; 618 619 unmap_page: 620 dma_unmap_page(vm->dma, addr, size, PCI_DMA_BIDIRECTIONAL); 621 free_page: 622 __free_pages(page, order); 623 skip: 624 if (size == I915_GTT_PAGE_SIZE_4K) 625 return -ENOMEM; 626 627 size = I915_GTT_PAGE_SIZE_4K; 628 gfp &= ~__GFP_NOWARN; 629 } while (1); 630 } 631 632 static void cleanup_scratch_page(struct i915_address_space *vm) 633 { 634 struct i915_page_dma *p = &vm->scratch_page; 635 636 dma_unmap_page(vm->dma, p->daddr, BIT(p->order) << PAGE_SHIFT, 637 PCI_DMA_BIDIRECTIONAL); 638 __free_pages(p->page, p->order); 639 } 640 641 static struct i915_page_table *alloc_pt(struct i915_address_space *vm) 642 { 643 struct i915_page_table *pt; 644 645 pt = kmalloc(sizeof(*pt), I915_GFP_ALLOW_FAIL); 646 if (unlikely(!pt)) 647 return ERR_PTR(-ENOMEM); 648 649 if (unlikely(setup_px(vm, pt))) { 650 kfree(pt); 651 return ERR_PTR(-ENOMEM); 652 } 653 654 pt->used_ptes = 0; 655 return pt; 656 } 657 658 static void free_pt(struct i915_address_space *vm, struct i915_page_table *pt) 659 { 660 cleanup_px(vm, pt); 661 kfree(pt); 662 } 663 664 static void gen8_initialize_pt(struct i915_address_space *vm, 665 struct i915_page_table *pt) 666 { 667 fill_px(vm, pt, vm->scratch_pte); 668 } 669 670 static void gen6_initialize_pt(struct i915_address_space *vm, 671 struct i915_page_table *pt) 672 { 673 fill32_px(vm, pt, vm->scratch_pte); 674 } 675 676 static struct i915_page_directory *alloc_pd(struct i915_address_space *vm) 677 { 678 struct i915_page_directory *pd; 679 680 pd = kzalloc(sizeof(*pd), I915_GFP_ALLOW_FAIL); 681 if (unlikely(!pd)) 682 return ERR_PTR(-ENOMEM); 683 684 if (unlikely(setup_px(vm, pd))) { 685 kfree(pd); 686 return ERR_PTR(-ENOMEM); 687 } 688 689 pd->used_pdes = 0; 690 return pd; 691 } 692 693 static void free_pd(struct i915_address_space *vm, 694 struct i915_page_directory *pd) 695 { 696 cleanup_px(vm, pd); 697 kfree(pd); 698 } 699 700 static void gen8_initialize_pd(struct i915_address_space *vm, 701 struct i915_page_directory *pd) 702 { 703 fill_px(vm, pd, 704 gen8_pde_encode(px_dma(vm->scratch_pt), I915_CACHE_LLC)); 705 memset_p((void **)pd->page_table, vm->scratch_pt, I915_PDES); 706 } 707 708 static int __pdp_init(struct i915_address_space *vm, 709 struct i915_page_directory_pointer *pdp) 710 { 711 const unsigned int pdpes = i915_pdpes_per_pdp(vm); 712 713 pdp->page_directory = kmalloc_array(pdpes, sizeof(*pdp->page_directory), 714 I915_GFP_ALLOW_FAIL); 715 if (unlikely(!pdp->page_directory)) 716 return -ENOMEM; 717 718 memset_p((void **)pdp->page_directory, vm->scratch_pd, pdpes); 719 720 return 0; 721 } 722 723 static void __pdp_fini(struct i915_page_directory_pointer *pdp) 724 { 725 kfree(pdp->page_directory); 726 pdp->page_directory = NULL; 727 } 728 729 static inline bool use_4lvl(const struct i915_address_space *vm) 730 { 731 return i915_vm_is_48bit(vm); 732 } 733 734 static struct i915_page_directory_pointer * 735 alloc_pdp(struct i915_address_space *vm) 736 { 737 struct i915_page_directory_pointer *pdp; 738 int ret = -ENOMEM; 739 740 GEM_BUG_ON(!use_4lvl(vm)); 741 742 pdp = kzalloc(sizeof(*pdp), GFP_KERNEL); 743 if (!pdp) 744 return ERR_PTR(-ENOMEM); 745 746 ret = __pdp_init(vm, pdp); 747 if (ret) 748 goto fail_bitmap; 749 750 ret = setup_px(vm, pdp); 751 if (ret) 752 goto fail_page_m; 753 754 return pdp; 755 756 fail_page_m: 757 __pdp_fini(pdp); 758 fail_bitmap: 759 kfree(pdp); 760 761 return ERR_PTR(ret); 762 } 763 764 static void free_pdp(struct i915_address_space *vm, 765 struct i915_page_directory_pointer *pdp) 766 { 767 __pdp_fini(pdp); 768 769 if (!use_4lvl(vm)) 770 return; 771 772 cleanup_px(vm, pdp); 773 kfree(pdp); 774 } 775 776 static void gen8_initialize_pdp(struct i915_address_space *vm, 777 struct i915_page_directory_pointer *pdp) 778 { 779 gen8_ppgtt_pdpe_t scratch_pdpe; 780 781 scratch_pdpe = gen8_pdpe_encode(px_dma(vm->scratch_pd), I915_CACHE_LLC); 782 783 fill_px(vm, pdp, scratch_pdpe); 784 } 785 786 static void gen8_initialize_pml4(struct i915_address_space *vm, 787 struct i915_pml4 *pml4) 788 { 789 fill_px(vm, pml4, 790 gen8_pml4e_encode(px_dma(vm->scratch_pdp), I915_CACHE_LLC)); 791 memset_p((void **)pml4->pdps, vm->scratch_pdp, GEN8_PML4ES_PER_PML4); 792 } 793 794 /* PDE TLBs are a pain to invalidate on GEN8+. When we modify 795 * the page table structures, we mark them dirty so that 796 * context switching/execlist queuing code takes extra steps 797 * to ensure that tlbs are flushed. 798 */ 799 static void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt) 800 { 801 ppgtt->pd_dirty_rings = INTEL_INFO(ppgtt->vm.i915)->ring_mask; 802 } 803 804 /* Removes entries from a single page table, releasing it if it's empty. 805 * Caller can use the return value to update higher-level entries. 806 */ 807 static bool gen8_ppgtt_clear_pt(const struct i915_address_space *vm, 808 struct i915_page_table *pt, 809 u64 start, u64 length) 810 { 811 unsigned int num_entries = gen8_pte_count(start, length); 812 unsigned int pte = gen8_pte_index(start); 813 unsigned int pte_end = pte + num_entries; 814 gen8_pte_t *vaddr; 815 816 GEM_BUG_ON(num_entries > pt->used_ptes); 817 818 pt->used_ptes -= num_entries; 819 if (!pt->used_ptes) 820 return true; 821 822 vaddr = kmap_atomic_px(pt); 823 while (pte < pte_end) 824 vaddr[pte++] = vm->scratch_pte; 825 kunmap_atomic(vaddr); 826 827 return false; 828 } 829 830 static void gen8_ppgtt_set_pde(struct i915_address_space *vm, 831 struct i915_page_directory *pd, 832 struct i915_page_table *pt, 833 unsigned int pde) 834 { 835 gen8_pde_t *vaddr; 836 837 pd->page_table[pde] = pt; 838 839 vaddr = kmap_atomic_px(pd); 840 vaddr[pde] = gen8_pde_encode(px_dma(pt), I915_CACHE_LLC); 841 kunmap_atomic(vaddr); 842 } 843 844 static bool gen8_ppgtt_clear_pd(struct i915_address_space *vm, 845 struct i915_page_directory *pd, 846 u64 start, u64 length) 847 { 848 struct i915_page_table *pt; 849 u32 pde; 850 851 gen8_for_each_pde(pt, pd, start, length, pde) { 852 GEM_BUG_ON(pt == vm->scratch_pt); 853 854 if (!gen8_ppgtt_clear_pt(vm, pt, start, length)) 855 continue; 856 857 gen8_ppgtt_set_pde(vm, pd, vm->scratch_pt, pde); 858 GEM_BUG_ON(!pd->used_pdes); 859 pd->used_pdes--; 860 861 free_pt(vm, pt); 862 } 863 864 return !pd->used_pdes; 865 } 866 867 static void gen8_ppgtt_set_pdpe(struct i915_address_space *vm, 868 struct i915_page_directory_pointer *pdp, 869 struct i915_page_directory *pd, 870 unsigned int pdpe) 871 { 872 gen8_ppgtt_pdpe_t *vaddr; 873 874 pdp->page_directory[pdpe] = pd; 875 if (!use_4lvl(vm)) 876 return; 877 878 vaddr = kmap_atomic_px(pdp); 879 vaddr[pdpe] = gen8_pdpe_encode(px_dma(pd), I915_CACHE_LLC); 880 kunmap_atomic(vaddr); 881 } 882 883 /* Removes entries from a single page dir pointer, releasing it if it's empty. 884 * Caller can use the return value to update higher-level entries 885 */ 886 static bool gen8_ppgtt_clear_pdp(struct i915_address_space *vm, 887 struct i915_page_directory_pointer *pdp, 888 u64 start, u64 length) 889 { 890 struct i915_page_directory *pd; 891 unsigned int pdpe; 892 893 gen8_for_each_pdpe(pd, pdp, start, length, pdpe) { 894 GEM_BUG_ON(pd == vm->scratch_pd); 895 896 if (!gen8_ppgtt_clear_pd(vm, pd, start, length)) 897 continue; 898 899 gen8_ppgtt_set_pdpe(vm, pdp, vm->scratch_pd, pdpe); 900 GEM_BUG_ON(!pdp->used_pdpes); 901 pdp->used_pdpes--; 902 903 free_pd(vm, pd); 904 } 905 906 return !pdp->used_pdpes; 907 } 908 909 static void gen8_ppgtt_clear_3lvl(struct i915_address_space *vm, 910 u64 start, u64 length) 911 { 912 gen8_ppgtt_clear_pdp(vm, &i915_vm_to_ppgtt(vm)->pdp, start, length); 913 } 914 915 static void gen8_ppgtt_set_pml4e(struct i915_pml4 *pml4, 916 struct i915_page_directory_pointer *pdp, 917 unsigned int pml4e) 918 { 919 gen8_ppgtt_pml4e_t *vaddr; 920 921 pml4->pdps[pml4e] = pdp; 922 923 vaddr = kmap_atomic_px(pml4); 924 vaddr[pml4e] = gen8_pml4e_encode(px_dma(pdp), I915_CACHE_LLC); 925 kunmap_atomic(vaddr); 926 } 927 928 /* Removes entries from a single pml4. 929 * This is the top-level structure in 4-level page tables used on gen8+. 930 * Empty entries are always scratch pml4e. 931 */ 932 static void gen8_ppgtt_clear_4lvl(struct i915_address_space *vm, 933 u64 start, u64 length) 934 { 935 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); 936 struct i915_pml4 *pml4 = &ppgtt->pml4; 937 struct i915_page_directory_pointer *pdp; 938 unsigned int pml4e; 939 940 GEM_BUG_ON(!use_4lvl(vm)); 941 942 gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) { 943 GEM_BUG_ON(pdp == vm->scratch_pdp); 944 945 if (!gen8_ppgtt_clear_pdp(vm, pdp, start, length)) 946 continue; 947 948 gen8_ppgtt_set_pml4e(pml4, vm->scratch_pdp, pml4e); 949 950 free_pdp(vm, pdp); 951 } 952 } 953 954 static inline struct sgt_dma { 955 struct scatterlist *sg; 956 dma_addr_t dma, max; 957 } sgt_dma(struct i915_vma *vma) { 958 struct scatterlist *sg = vma->pages->sgl; 959 dma_addr_t addr = sg_dma_address(sg); 960 return (struct sgt_dma) { sg, addr, addr + sg->length }; 961 } 962 963 struct gen8_insert_pte { 964 u16 pml4e; 965 u16 pdpe; 966 u16 pde; 967 u16 pte; 968 }; 969 970 static __always_inline struct gen8_insert_pte gen8_insert_pte(u64 start) 971 { 972 return (struct gen8_insert_pte) { 973 gen8_pml4e_index(start), 974 gen8_pdpe_index(start), 975 gen8_pde_index(start), 976 gen8_pte_index(start), 977 }; 978 } 979 980 static __always_inline bool 981 gen8_ppgtt_insert_pte_entries(struct i915_hw_ppgtt *ppgtt, 982 struct i915_page_directory_pointer *pdp, 983 struct sgt_dma *iter, 984 struct gen8_insert_pte *idx, 985 enum i915_cache_level cache_level, 986 u32 flags) 987 { 988 struct i915_page_directory *pd; 989 const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags); 990 gen8_pte_t *vaddr; 991 bool ret; 992 993 GEM_BUG_ON(idx->pdpe >= i915_pdpes_per_pdp(&ppgtt->vm)); 994 pd = pdp->page_directory[idx->pdpe]; 995 vaddr = kmap_atomic_px(pd->page_table[idx->pde]); 996 do { 997 vaddr[idx->pte] = pte_encode | iter->dma; 998 999 iter->dma += I915_GTT_PAGE_SIZE; 1000 if (iter->dma >= iter->max) { 1001 iter->sg = __sg_next(iter->sg); 1002 if (!iter->sg) { 1003 ret = false; 1004 break; 1005 } 1006 1007 iter->dma = sg_dma_address(iter->sg); 1008 iter->max = iter->dma + iter->sg->length; 1009 } 1010 1011 if (++idx->pte == GEN8_PTES) { 1012 idx->pte = 0; 1013 1014 if (++idx->pde == I915_PDES) { 1015 idx->pde = 0; 1016 1017 /* Limited by sg length for 3lvl */ 1018 if (++idx->pdpe == GEN8_PML4ES_PER_PML4) { 1019 idx->pdpe = 0; 1020 ret = true; 1021 break; 1022 } 1023 1024 GEM_BUG_ON(idx->pdpe >= i915_pdpes_per_pdp(&ppgtt->vm)); 1025 pd = pdp->page_directory[idx->pdpe]; 1026 } 1027 1028 kunmap_atomic(vaddr); 1029 vaddr = kmap_atomic_px(pd->page_table[idx->pde]); 1030 } 1031 } while (1); 1032 kunmap_atomic(vaddr); 1033 1034 return ret; 1035 } 1036 1037 static void gen8_ppgtt_insert_3lvl(struct i915_address_space *vm, 1038 struct i915_vma *vma, 1039 enum i915_cache_level cache_level, 1040 u32 flags) 1041 { 1042 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); 1043 struct sgt_dma iter = sgt_dma(vma); 1044 struct gen8_insert_pte idx = gen8_insert_pte(vma->node.start); 1045 1046 gen8_ppgtt_insert_pte_entries(ppgtt, &ppgtt->pdp, &iter, &idx, 1047 cache_level, flags); 1048 1049 vma->page_sizes.gtt = I915_GTT_PAGE_SIZE; 1050 } 1051 1052 static void gen8_ppgtt_insert_huge_entries(struct i915_vma *vma, 1053 struct i915_page_directory_pointer **pdps, 1054 struct sgt_dma *iter, 1055 enum i915_cache_level cache_level, 1056 u32 flags) 1057 { 1058 const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags); 1059 u64 start = vma->node.start; 1060 dma_addr_t rem = iter->sg->length; 1061 1062 do { 1063 struct gen8_insert_pte idx = gen8_insert_pte(start); 1064 struct i915_page_directory_pointer *pdp = pdps[idx.pml4e]; 1065 struct i915_page_directory *pd = pdp->page_directory[idx.pdpe]; 1066 unsigned int page_size; 1067 bool maybe_64K = false; 1068 gen8_pte_t encode = pte_encode; 1069 gen8_pte_t *vaddr; 1070 u16 index, max; 1071 1072 if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_2M && 1073 IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_2M) && 1074 rem >= I915_GTT_PAGE_SIZE_2M && !idx.pte) { 1075 index = idx.pde; 1076 max = I915_PDES; 1077 page_size = I915_GTT_PAGE_SIZE_2M; 1078 1079 encode |= GEN8_PDE_PS_2M; 1080 1081 vaddr = kmap_atomic_px(pd); 1082 } else { 1083 struct i915_page_table *pt = pd->page_table[idx.pde]; 1084 1085 index = idx.pte; 1086 max = GEN8_PTES; 1087 page_size = I915_GTT_PAGE_SIZE; 1088 1089 if (!index && 1090 vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K && 1091 IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) && 1092 (IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) || 1093 rem >= (max - index) * I915_GTT_PAGE_SIZE)) 1094 maybe_64K = true; 1095 1096 vaddr = kmap_atomic_px(pt); 1097 } 1098 1099 do { 1100 GEM_BUG_ON(iter->sg->length < page_size); 1101 vaddr[index++] = encode | iter->dma; 1102 1103 start += page_size; 1104 iter->dma += page_size; 1105 rem -= page_size; 1106 if (iter->dma >= iter->max) { 1107 iter->sg = __sg_next(iter->sg); 1108 if (!iter->sg) 1109 break; 1110 1111 rem = iter->sg->length; 1112 iter->dma = sg_dma_address(iter->sg); 1113 iter->max = iter->dma + rem; 1114 1115 if (maybe_64K && index < max && 1116 !(IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) && 1117 (IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) || 1118 rem >= (max - index) * I915_GTT_PAGE_SIZE))) 1119 maybe_64K = false; 1120 1121 if (unlikely(!IS_ALIGNED(iter->dma, page_size))) 1122 break; 1123 } 1124 } while (rem >= page_size && index < max); 1125 1126 kunmap_atomic(vaddr); 1127 1128 /* 1129 * Is it safe to mark the 2M block as 64K? -- Either we have 1130 * filled whole page-table with 64K entries, or filled part of 1131 * it and have reached the end of the sg table and we have 1132 * enough padding. 1133 */ 1134 if (maybe_64K && 1135 (index == max || 1136 (i915_vm_has_scratch_64K(vma->vm) && 1137 !iter->sg && IS_ALIGNED(vma->node.start + 1138 vma->node.size, 1139 I915_GTT_PAGE_SIZE_2M)))) { 1140 vaddr = kmap_atomic_px(pd); 1141 vaddr[idx.pde] |= GEN8_PDE_IPS_64K; 1142 kunmap_atomic(vaddr); 1143 page_size = I915_GTT_PAGE_SIZE_64K; 1144 1145 /* 1146 * We write all 4K page entries, even when using 64K 1147 * pages. In order to verify that the HW isn't cheating 1148 * by using the 4K PTE instead of the 64K PTE, we want 1149 * to remove all the surplus entries. If the HW skipped 1150 * the 64K PTE, it will read/write into the scratch page 1151 * instead - which we detect as missing results during 1152 * selftests. 1153 */ 1154 if (I915_SELFTEST_ONLY(vma->vm->scrub_64K)) { 1155 u16 i; 1156 1157 encode = vma->vm->scratch_pte; 1158 vaddr = kmap_atomic_px(pd->page_table[idx.pde]); 1159 1160 for (i = 1; i < index; i += 16) 1161 memset64(vaddr + i, encode, 15); 1162 1163 kunmap_atomic(vaddr); 1164 } 1165 } 1166 1167 vma->page_sizes.gtt |= page_size; 1168 } while (iter->sg); 1169 } 1170 1171 static void gen8_ppgtt_insert_4lvl(struct i915_address_space *vm, 1172 struct i915_vma *vma, 1173 enum i915_cache_level cache_level, 1174 u32 flags) 1175 { 1176 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); 1177 struct sgt_dma iter = sgt_dma(vma); 1178 struct i915_page_directory_pointer **pdps = ppgtt->pml4.pdps; 1179 1180 if (vma->page_sizes.sg > I915_GTT_PAGE_SIZE) { 1181 gen8_ppgtt_insert_huge_entries(vma, pdps, &iter, cache_level, 1182 flags); 1183 } else { 1184 struct gen8_insert_pte idx = gen8_insert_pte(vma->node.start); 1185 1186 while (gen8_ppgtt_insert_pte_entries(ppgtt, pdps[idx.pml4e++], 1187 &iter, &idx, cache_level, 1188 flags)) 1189 GEM_BUG_ON(idx.pml4e >= GEN8_PML4ES_PER_PML4); 1190 1191 vma->page_sizes.gtt = I915_GTT_PAGE_SIZE; 1192 } 1193 } 1194 1195 static void gen8_free_page_tables(struct i915_address_space *vm, 1196 struct i915_page_directory *pd) 1197 { 1198 int i; 1199 1200 for (i = 0; i < I915_PDES; i++) { 1201 if (pd->page_table[i] != vm->scratch_pt) 1202 free_pt(vm, pd->page_table[i]); 1203 } 1204 } 1205 1206 static int gen8_init_scratch(struct i915_address_space *vm) 1207 { 1208 int ret; 1209 1210 /* 1211 * If everybody agrees to not to write into the scratch page, 1212 * we can reuse it for all vm, keeping contexts and processes separate. 1213 */ 1214 if (vm->has_read_only && 1215 vm->i915->kernel_context && 1216 vm->i915->kernel_context->ppgtt) { 1217 struct i915_address_space *clone = 1218 &vm->i915->kernel_context->ppgtt->vm; 1219 1220 GEM_BUG_ON(!clone->has_read_only); 1221 1222 vm->scratch_page.order = clone->scratch_page.order; 1223 vm->scratch_pte = clone->scratch_pte; 1224 vm->scratch_pt = clone->scratch_pt; 1225 vm->scratch_pd = clone->scratch_pd; 1226 vm->scratch_pdp = clone->scratch_pdp; 1227 return 0; 1228 } 1229 1230 ret = setup_scratch_page(vm, __GFP_HIGHMEM); 1231 if (ret) 1232 return ret; 1233 1234 vm->scratch_pte = 1235 gen8_pte_encode(vm->scratch_page.daddr, 1236 I915_CACHE_LLC, 1237 PTE_READ_ONLY); 1238 1239 vm->scratch_pt = alloc_pt(vm); 1240 if (IS_ERR(vm->scratch_pt)) { 1241 ret = PTR_ERR(vm->scratch_pt); 1242 goto free_scratch_page; 1243 } 1244 1245 vm->scratch_pd = alloc_pd(vm); 1246 if (IS_ERR(vm->scratch_pd)) { 1247 ret = PTR_ERR(vm->scratch_pd); 1248 goto free_pt; 1249 } 1250 1251 if (use_4lvl(vm)) { 1252 vm->scratch_pdp = alloc_pdp(vm); 1253 if (IS_ERR(vm->scratch_pdp)) { 1254 ret = PTR_ERR(vm->scratch_pdp); 1255 goto free_pd; 1256 } 1257 } 1258 1259 gen8_initialize_pt(vm, vm->scratch_pt); 1260 gen8_initialize_pd(vm, vm->scratch_pd); 1261 if (use_4lvl(vm)) 1262 gen8_initialize_pdp(vm, vm->scratch_pdp); 1263 1264 return 0; 1265 1266 free_pd: 1267 free_pd(vm, vm->scratch_pd); 1268 free_pt: 1269 free_pt(vm, vm->scratch_pt); 1270 free_scratch_page: 1271 cleanup_scratch_page(vm); 1272 1273 return ret; 1274 } 1275 1276 static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create) 1277 { 1278 struct i915_address_space *vm = &ppgtt->vm; 1279 struct drm_i915_private *dev_priv = vm->i915; 1280 enum vgt_g2v_type msg; 1281 int i; 1282 1283 if (use_4lvl(vm)) { 1284 const u64 daddr = px_dma(&ppgtt->pml4); 1285 1286 I915_WRITE(vgtif_reg(pdp[0].lo), lower_32_bits(daddr)); 1287 I915_WRITE(vgtif_reg(pdp[0].hi), upper_32_bits(daddr)); 1288 1289 msg = (create ? VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE : 1290 VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY); 1291 } else { 1292 for (i = 0; i < GEN8_3LVL_PDPES; i++) { 1293 const u64 daddr = i915_page_dir_dma_addr(ppgtt, i); 1294 1295 I915_WRITE(vgtif_reg(pdp[i].lo), lower_32_bits(daddr)); 1296 I915_WRITE(vgtif_reg(pdp[i].hi), upper_32_bits(daddr)); 1297 } 1298 1299 msg = (create ? VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE : 1300 VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY); 1301 } 1302 1303 I915_WRITE(vgtif_reg(g2v_notify), msg); 1304 1305 return 0; 1306 } 1307 1308 static void gen8_free_scratch(struct i915_address_space *vm) 1309 { 1310 if (!vm->scratch_page.daddr) 1311 return; 1312 1313 if (use_4lvl(vm)) 1314 free_pdp(vm, vm->scratch_pdp); 1315 free_pd(vm, vm->scratch_pd); 1316 free_pt(vm, vm->scratch_pt); 1317 cleanup_scratch_page(vm); 1318 } 1319 1320 static void gen8_ppgtt_cleanup_3lvl(struct i915_address_space *vm, 1321 struct i915_page_directory_pointer *pdp) 1322 { 1323 const unsigned int pdpes = i915_pdpes_per_pdp(vm); 1324 int i; 1325 1326 for (i = 0; i < pdpes; i++) { 1327 if (pdp->page_directory[i] == vm->scratch_pd) 1328 continue; 1329 1330 gen8_free_page_tables(vm, pdp->page_directory[i]); 1331 free_pd(vm, pdp->page_directory[i]); 1332 } 1333 1334 free_pdp(vm, pdp); 1335 } 1336 1337 static void gen8_ppgtt_cleanup_4lvl(struct i915_hw_ppgtt *ppgtt) 1338 { 1339 int i; 1340 1341 for (i = 0; i < GEN8_PML4ES_PER_PML4; i++) { 1342 if (ppgtt->pml4.pdps[i] == ppgtt->vm.scratch_pdp) 1343 continue; 1344 1345 gen8_ppgtt_cleanup_3lvl(&ppgtt->vm, ppgtt->pml4.pdps[i]); 1346 } 1347 1348 cleanup_px(&ppgtt->vm, &ppgtt->pml4); 1349 } 1350 1351 static void gen8_ppgtt_cleanup(struct i915_address_space *vm) 1352 { 1353 struct drm_i915_private *dev_priv = vm->i915; 1354 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); 1355 1356 if (intel_vgpu_active(dev_priv)) 1357 gen8_ppgtt_notify_vgt(ppgtt, false); 1358 1359 if (use_4lvl(vm)) 1360 gen8_ppgtt_cleanup_4lvl(ppgtt); 1361 else 1362 gen8_ppgtt_cleanup_3lvl(&ppgtt->vm, &ppgtt->pdp); 1363 1364 gen8_free_scratch(vm); 1365 } 1366 1367 static int gen8_ppgtt_alloc_pd(struct i915_address_space *vm, 1368 struct i915_page_directory *pd, 1369 u64 start, u64 length) 1370 { 1371 struct i915_page_table *pt; 1372 u64 from = start; 1373 unsigned int pde; 1374 1375 gen8_for_each_pde(pt, pd, start, length, pde) { 1376 int count = gen8_pte_count(start, length); 1377 1378 if (pt == vm->scratch_pt) { 1379 pd->used_pdes++; 1380 1381 pt = alloc_pt(vm); 1382 if (IS_ERR(pt)) { 1383 pd->used_pdes--; 1384 goto unwind; 1385 } 1386 1387 if (count < GEN8_PTES || intel_vgpu_active(vm->i915)) 1388 gen8_initialize_pt(vm, pt); 1389 1390 gen8_ppgtt_set_pde(vm, pd, pt, pde); 1391 GEM_BUG_ON(pd->used_pdes > I915_PDES); 1392 } 1393 1394 pt->used_ptes += count; 1395 } 1396 return 0; 1397 1398 unwind: 1399 gen8_ppgtt_clear_pd(vm, pd, from, start - from); 1400 return -ENOMEM; 1401 } 1402 1403 static int gen8_ppgtt_alloc_pdp(struct i915_address_space *vm, 1404 struct i915_page_directory_pointer *pdp, 1405 u64 start, u64 length) 1406 { 1407 struct i915_page_directory *pd; 1408 u64 from = start; 1409 unsigned int pdpe; 1410 int ret; 1411 1412 gen8_for_each_pdpe(pd, pdp, start, length, pdpe) { 1413 if (pd == vm->scratch_pd) { 1414 pdp->used_pdpes++; 1415 1416 pd = alloc_pd(vm); 1417 if (IS_ERR(pd)) { 1418 pdp->used_pdpes--; 1419 goto unwind; 1420 } 1421 1422 gen8_initialize_pd(vm, pd); 1423 gen8_ppgtt_set_pdpe(vm, pdp, pd, pdpe); 1424 GEM_BUG_ON(pdp->used_pdpes > i915_pdpes_per_pdp(vm)); 1425 } 1426 1427 ret = gen8_ppgtt_alloc_pd(vm, pd, start, length); 1428 if (unlikely(ret)) 1429 goto unwind_pd; 1430 } 1431 1432 return 0; 1433 1434 unwind_pd: 1435 if (!pd->used_pdes) { 1436 gen8_ppgtt_set_pdpe(vm, pdp, vm->scratch_pd, pdpe); 1437 GEM_BUG_ON(!pdp->used_pdpes); 1438 pdp->used_pdpes--; 1439 free_pd(vm, pd); 1440 } 1441 unwind: 1442 gen8_ppgtt_clear_pdp(vm, pdp, from, start - from); 1443 return -ENOMEM; 1444 } 1445 1446 static int gen8_ppgtt_alloc_3lvl(struct i915_address_space *vm, 1447 u64 start, u64 length) 1448 { 1449 return gen8_ppgtt_alloc_pdp(vm, 1450 &i915_vm_to_ppgtt(vm)->pdp, start, length); 1451 } 1452 1453 static int gen8_ppgtt_alloc_4lvl(struct i915_address_space *vm, 1454 u64 start, u64 length) 1455 { 1456 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); 1457 struct i915_pml4 *pml4 = &ppgtt->pml4; 1458 struct i915_page_directory_pointer *pdp; 1459 u64 from = start; 1460 u32 pml4e; 1461 int ret; 1462 1463 gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) { 1464 if (pml4->pdps[pml4e] == vm->scratch_pdp) { 1465 pdp = alloc_pdp(vm); 1466 if (IS_ERR(pdp)) 1467 goto unwind; 1468 1469 gen8_initialize_pdp(vm, pdp); 1470 gen8_ppgtt_set_pml4e(pml4, pdp, pml4e); 1471 } 1472 1473 ret = gen8_ppgtt_alloc_pdp(vm, pdp, start, length); 1474 if (unlikely(ret)) 1475 goto unwind_pdp; 1476 } 1477 1478 return 0; 1479 1480 unwind_pdp: 1481 if (!pdp->used_pdpes) { 1482 gen8_ppgtt_set_pml4e(pml4, vm->scratch_pdp, pml4e); 1483 free_pdp(vm, pdp); 1484 } 1485 unwind: 1486 gen8_ppgtt_clear_4lvl(vm, from, start - from); 1487 return -ENOMEM; 1488 } 1489 1490 static int gen8_preallocate_top_level_pdp(struct i915_hw_ppgtt *ppgtt) 1491 { 1492 struct i915_address_space *vm = &ppgtt->vm; 1493 struct i915_page_directory_pointer *pdp = &ppgtt->pdp; 1494 struct i915_page_directory *pd; 1495 u64 start = 0, length = ppgtt->vm.total; 1496 u64 from = start; 1497 unsigned int pdpe; 1498 1499 gen8_for_each_pdpe(pd, pdp, start, length, pdpe) { 1500 pd = alloc_pd(vm); 1501 if (IS_ERR(pd)) 1502 goto unwind; 1503 1504 gen8_initialize_pd(vm, pd); 1505 gen8_ppgtt_set_pdpe(vm, pdp, pd, pdpe); 1506 pdp->used_pdpes++; 1507 } 1508 1509 pdp->used_pdpes++; /* never remove */ 1510 return 0; 1511 1512 unwind: 1513 start -= from; 1514 gen8_for_each_pdpe(pd, pdp, from, start, pdpe) { 1515 gen8_ppgtt_set_pdpe(vm, pdp, vm->scratch_pd, pdpe); 1516 free_pd(vm, pd); 1517 } 1518 pdp->used_pdpes = 0; 1519 return -ENOMEM; 1520 } 1521 1522 /* 1523 * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers 1524 * with a net effect resembling a 2-level page table in normal x86 terms. Each 1525 * PDP represents 1GB of memory 4 * 512 * 512 * 4096 = 4GB legacy 32b address 1526 * space. 1527 * 1528 */ 1529 static struct i915_hw_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915) 1530 { 1531 struct i915_hw_ppgtt *ppgtt; 1532 int err; 1533 1534 ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL); 1535 if (!ppgtt) 1536 return ERR_PTR(-ENOMEM); 1537 1538 kref_init(&ppgtt->ref); 1539 1540 ppgtt->vm.i915 = i915; 1541 ppgtt->vm.dma = &i915->drm.pdev->dev; 1542 1543 ppgtt->vm.total = HAS_FULL_48BIT_PPGTT(i915) ? 1544 1ULL << 48 : 1545 1ULL << 32; 1546 1547 /* From bdw, there is support for read-only pages in the PPGTT. */ 1548 ppgtt->vm.has_read_only = true; 1549 1550 i915_address_space_init(&ppgtt->vm, VM_CLASS_PPGTT); 1551 1552 /* There are only few exceptions for gen >=6. chv and bxt. 1553 * And we are not sure about the latter so play safe for now. 1554 */ 1555 if (IS_CHERRYVIEW(i915) || IS_BROXTON(i915)) 1556 ppgtt->vm.pt_kmap_wc = true; 1557 1558 err = gen8_init_scratch(&ppgtt->vm); 1559 if (err) 1560 goto err_free; 1561 1562 if (use_4lvl(&ppgtt->vm)) { 1563 err = setup_px(&ppgtt->vm, &ppgtt->pml4); 1564 if (err) 1565 goto err_scratch; 1566 1567 gen8_initialize_pml4(&ppgtt->vm, &ppgtt->pml4); 1568 1569 ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc_4lvl; 1570 ppgtt->vm.insert_entries = gen8_ppgtt_insert_4lvl; 1571 ppgtt->vm.clear_range = gen8_ppgtt_clear_4lvl; 1572 } else { 1573 err = __pdp_init(&ppgtt->vm, &ppgtt->pdp); 1574 if (err) 1575 goto err_scratch; 1576 1577 if (intel_vgpu_active(i915)) { 1578 err = gen8_preallocate_top_level_pdp(ppgtt); 1579 if (err) { 1580 __pdp_fini(&ppgtt->pdp); 1581 goto err_scratch; 1582 } 1583 } 1584 1585 ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc_3lvl; 1586 ppgtt->vm.insert_entries = gen8_ppgtt_insert_3lvl; 1587 ppgtt->vm.clear_range = gen8_ppgtt_clear_3lvl; 1588 } 1589 1590 if (intel_vgpu_active(i915)) 1591 gen8_ppgtt_notify_vgt(ppgtt, true); 1592 1593 ppgtt->vm.cleanup = gen8_ppgtt_cleanup; 1594 1595 ppgtt->vm.vma_ops.bind_vma = ppgtt_bind_vma; 1596 ppgtt->vm.vma_ops.unbind_vma = ppgtt_unbind_vma; 1597 ppgtt->vm.vma_ops.set_pages = ppgtt_set_pages; 1598 ppgtt->vm.vma_ops.clear_pages = clear_pages; 1599 1600 return ppgtt; 1601 1602 err_scratch: 1603 gen8_free_scratch(&ppgtt->vm); 1604 err_free: 1605 kfree(ppgtt); 1606 return ERR_PTR(err); 1607 } 1608 1609 /* Write pde (index) from the page directory @pd to the page table @pt */ 1610 static inline void gen6_write_pde(const struct gen6_hw_ppgtt *ppgtt, 1611 const unsigned int pde, 1612 const struct i915_page_table *pt) 1613 { 1614 /* Caller needs to make sure the write completes if necessary */ 1615 iowrite32(GEN6_PDE_ADDR_ENCODE(px_dma(pt)) | GEN6_PDE_VALID, 1616 ppgtt->pd_addr + pde); 1617 } 1618 1619 static void gen7_ppgtt_enable(struct drm_i915_private *dev_priv) 1620 { 1621 struct intel_engine_cs *engine; 1622 u32 ecochk, ecobits; 1623 enum intel_engine_id id; 1624 1625 ecobits = I915_READ(GAC_ECO_BITS); 1626 I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B); 1627 1628 ecochk = I915_READ(GAM_ECOCHK); 1629 if (IS_HASWELL(dev_priv)) { 1630 ecochk |= ECOCHK_PPGTT_WB_HSW; 1631 } else { 1632 ecochk |= ECOCHK_PPGTT_LLC_IVB; 1633 ecochk &= ~ECOCHK_PPGTT_GFDT_IVB; 1634 } 1635 I915_WRITE(GAM_ECOCHK, ecochk); 1636 1637 for_each_engine(engine, dev_priv, id) { 1638 /* GFX_MODE is per-ring on gen7+ */ 1639 I915_WRITE(RING_MODE_GEN7(engine), 1640 _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); 1641 } 1642 } 1643 1644 static void gen6_ppgtt_enable(struct drm_i915_private *dev_priv) 1645 { 1646 u32 ecochk, gab_ctl, ecobits; 1647 1648 ecobits = I915_READ(GAC_ECO_BITS); 1649 I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_SNB_BIT | 1650 ECOBITS_PPGTT_CACHE64B); 1651 1652 gab_ctl = I915_READ(GAB_CTL); 1653 I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT); 1654 1655 ecochk = I915_READ(GAM_ECOCHK); 1656 I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B); 1657 1658 if (HAS_PPGTT(dev_priv)) /* may be disabled for VT-d */ 1659 I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); 1660 } 1661 1662 /* PPGTT support for Sandybdrige/Gen6 and later */ 1663 static void gen6_ppgtt_clear_range(struct i915_address_space *vm, 1664 u64 start, u64 length) 1665 { 1666 struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm)); 1667 unsigned int first_entry = start / I915_GTT_PAGE_SIZE; 1668 unsigned int pde = first_entry / GEN6_PTES; 1669 unsigned int pte = first_entry % GEN6_PTES; 1670 unsigned int num_entries = length / I915_GTT_PAGE_SIZE; 1671 const gen6_pte_t scratch_pte = vm->scratch_pte; 1672 1673 while (num_entries) { 1674 struct i915_page_table *pt = ppgtt->base.pd.page_table[pde++]; 1675 const unsigned int end = min(pte + num_entries, GEN6_PTES); 1676 const unsigned int count = end - pte; 1677 gen6_pte_t *vaddr; 1678 1679 GEM_BUG_ON(pt == vm->scratch_pt); 1680 1681 num_entries -= count; 1682 1683 GEM_BUG_ON(count > pt->used_ptes); 1684 pt->used_ptes -= count; 1685 if (!pt->used_ptes) 1686 ppgtt->scan_for_unused_pt = true; 1687 1688 /* 1689 * Note that the hw doesn't support removing PDE on the fly 1690 * (they are cached inside the context with no means to 1691 * invalidate the cache), so we can only reset the PTE 1692 * entries back to scratch. 1693 */ 1694 1695 vaddr = kmap_atomic_px(pt); 1696 do { 1697 vaddr[pte++] = scratch_pte; 1698 } while (pte < end); 1699 kunmap_atomic(vaddr); 1700 1701 pte = 0; 1702 } 1703 } 1704 1705 static void gen6_ppgtt_insert_entries(struct i915_address_space *vm, 1706 struct i915_vma *vma, 1707 enum i915_cache_level cache_level, 1708 u32 flags) 1709 { 1710 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); 1711 unsigned first_entry = vma->node.start / I915_GTT_PAGE_SIZE; 1712 unsigned act_pt = first_entry / GEN6_PTES; 1713 unsigned act_pte = first_entry % GEN6_PTES; 1714 const u32 pte_encode = vm->pte_encode(0, cache_level, flags); 1715 struct sgt_dma iter = sgt_dma(vma); 1716 gen6_pte_t *vaddr; 1717 1718 GEM_BUG_ON(ppgtt->pd.page_table[act_pt] == vm->scratch_pt); 1719 1720 vaddr = kmap_atomic_px(ppgtt->pd.page_table[act_pt]); 1721 do { 1722 vaddr[act_pte] = pte_encode | GEN6_PTE_ADDR_ENCODE(iter.dma); 1723 1724 iter.dma += I915_GTT_PAGE_SIZE; 1725 if (iter.dma == iter.max) { 1726 iter.sg = __sg_next(iter.sg); 1727 if (!iter.sg) 1728 break; 1729 1730 iter.dma = sg_dma_address(iter.sg); 1731 iter.max = iter.dma + iter.sg->length; 1732 } 1733 1734 if (++act_pte == GEN6_PTES) { 1735 kunmap_atomic(vaddr); 1736 vaddr = kmap_atomic_px(ppgtt->pd.page_table[++act_pt]); 1737 act_pte = 0; 1738 } 1739 } while (1); 1740 kunmap_atomic(vaddr); 1741 1742 vma->page_sizes.gtt = I915_GTT_PAGE_SIZE; 1743 } 1744 1745 static int gen6_alloc_va_range(struct i915_address_space *vm, 1746 u64 start, u64 length) 1747 { 1748 struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm)); 1749 struct i915_page_table *pt; 1750 u64 from = start; 1751 unsigned int pde; 1752 bool flush = false; 1753 1754 gen6_for_each_pde(pt, &ppgtt->base.pd, start, length, pde) { 1755 const unsigned int count = gen6_pte_count(start, length); 1756 1757 if (pt == vm->scratch_pt) { 1758 pt = alloc_pt(vm); 1759 if (IS_ERR(pt)) 1760 goto unwind_out; 1761 1762 gen6_initialize_pt(vm, pt); 1763 ppgtt->base.pd.page_table[pde] = pt; 1764 1765 if (i915_vma_is_bound(ppgtt->vma, 1766 I915_VMA_GLOBAL_BIND)) { 1767 gen6_write_pde(ppgtt, pde, pt); 1768 flush = true; 1769 } 1770 1771 GEM_BUG_ON(pt->used_ptes); 1772 } 1773 1774 pt->used_ptes += count; 1775 } 1776 1777 if (flush) { 1778 mark_tlbs_dirty(&ppgtt->base); 1779 gen6_ggtt_invalidate(ppgtt->base.vm.i915); 1780 } 1781 1782 return 0; 1783 1784 unwind_out: 1785 gen6_ppgtt_clear_range(vm, from, start - from); 1786 return -ENOMEM; 1787 } 1788 1789 static int gen6_ppgtt_init_scratch(struct gen6_hw_ppgtt *ppgtt) 1790 { 1791 struct i915_address_space * const vm = &ppgtt->base.vm; 1792 struct i915_page_table *unused; 1793 u32 pde; 1794 int ret; 1795 1796 ret = setup_scratch_page(vm, __GFP_HIGHMEM); 1797 if (ret) 1798 return ret; 1799 1800 vm->scratch_pte = vm->pte_encode(vm->scratch_page.daddr, 1801 I915_CACHE_NONE, 1802 PTE_READ_ONLY); 1803 1804 vm->scratch_pt = alloc_pt(vm); 1805 if (IS_ERR(vm->scratch_pt)) { 1806 cleanup_scratch_page(vm); 1807 return PTR_ERR(vm->scratch_pt); 1808 } 1809 1810 gen6_initialize_pt(vm, vm->scratch_pt); 1811 gen6_for_all_pdes(unused, &ppgtt->base.pd, pde) 1812 ppgtt->base.pd.page_table[pde] = vm->scratch_pt; 1813 1814 return 0; 1815 } 1816 1817 static void gen6_ppgtt_free_scratch(struct i915_address_space *vm) 1818 { 1819 free_pt(vm, vm->scratch_pt); 1820 cleanup_scratch_page(vm); 1821 } 1822 1823 static void gen6_ppgtt_free_pd(struct gen6_hw_ppgtt *ppgtt) 1824 { 1825 struct i915_page_table *pt; 1826 u32 pde; 1827 1828 gen6_for_all_pdes(pt, &ppgtt->base.pd, pde) 1829 if (pt != ppgtt->base.vm.scratch_pt) 1830 free_pt(&ppgtt->base.vm, pt); 1831 } 1832 1833 static void gen6_ppgtt_cleanup(struct i915_address_space *vm) 1834 { 1835 struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm)); 1836 1837 i915_vma_destroy(ppgtt->vma); 1838 1839 gen6_ppgtt_free_pd(ppgtt); 1840 gen6_ppgtt_free_scratch(vm); 1841 } 1842 1843 static int pd_vma_set_pages(struct i915_vma *vma) 1844 { 1845 vma->pages = ERR_PTR(-ENODEV); 1846 return 0; 1847 } 1848 1849 static void pd_vma_clear_pages(struct i915_vma *vma) 1850 { 1851 GEM_BUG_ON(!vma->pages); 1852 1853 vma->pages = NULL; 1854 } 1855 1856 static int pd_vma_bind(struct i915_vma *vma, 1857 enum i915_cache_level cache_level, 1858 u32 unused) 1859 { 1860 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vma->vm); 1861 struct gen6_hw_ppgtt *ppgtt = vma->private; 1862 u32 ggtt_offset = i915_ggtt_offset(vma) / I915_GTT_PAGE_SIZE; 1863 struct i915_page_table *pt; 1864 unsigned int pde; 1865 1866 ppgtt->base.pd.base.ggtt_offset = ggtt_offset * sizeof(gen6_pte_t); 1867 ppgtt->pd_addr = (gen6_pte_t __iomem *)ggtt->gsm + ggtt_offset; 1868 1869 gen6_for_all_pdes(pt, &ppgtt->base.pd, pde) 1870 gen6_write_pde(ppgtt, pde, pt); 1871 1872 mark_tlbs_dirty(&ppgtt->base); 1873 gen6_ggtt_invalidate(ppgtt->base.vm.i915); 1874 1875 return 0; 1876 } 1877 1878 static void pd_vma_unbind(struct i915_vma *vma) 1879 { 1880 struct gen6_hw_ppgtt *ppgtt = vma->private; 1881 struct i915_page_table * const scratch_pt = ppgtt->base.vm.scratch_pt; 1882 struct i915_page_table *pt; 1883 unsigned int pde; 1884 1885 if (!ppgtt->scan_for_unused_pt) 1886 return; 1887 1888 /* Free all no longer used page tables */ 1889 gen6_for_all_pdes(pt, &ppgtt->base.pd, pde) { 1890 if (pt->used_ptes || pt == scratch_pt) 1891 continue; 1892 1893 free_pt(&ppgtt->base.vm, pt); 1894 ppgtt->base.pd.page_table[pde] = scratch_pt; 1895 } 1896 1897 ppgtt->scan_for_unused_pt = false; 1898 } 1899 1900 static const struct i915_vma_ops pd_vma_ops = { 1901 .set_pages = pd_vma_set_pages, 1902 .clear_pages = pd_vma_clear_pages, 1903 .bind_vma = pd_vma_bind, 1904 .unbind_vma = pd_vma_unbind, 1905 }; 1906 1907 static struct i915_vma *pd_vma_create(struct gen6_hw_ppgtt *ppgtt, int size) 1908 { 1909 struct drm_i915_private *i915 = ppgtt->base.vm.i915; 1910 struct i915_ggtt *ggtt = &i915->ggtt; 1911 struct i915_vma *vma; 1912 1913 GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)); 1914 GEM_BUG_ON(size > ggtt->vm.total); 1915 1916 vma = kmem_cache_zalloc(i915->vmas, GFP_KERNEL); 1917 if (!vma) 1918 return ERR_PTR(-ENOMEM); 1919 1920 i915_active_init(i915, &vma->active, NULL); 1921 INIT_ACTIVE_REQUEST(&vma->last_fence); 1922 1923 vma->vm = &ggtt->vm; 1924 vma->ops = &pd_vma_ops; 1925 vma->private = ppgtt; 1926 1927 vma->size = size; 1928 vma->fence_size = size; 1929 vma->flags = I915_VMA_GGTT; 1930 vma->ggtt_view.type = I915_GGTT_VIEW_ROTATED; /* prevent fencing */ 1931 1932 INIT_LIST_HEAD(&vma->obj_link); 1933 1934 mutex_lock(&vma->vm->mutex); 1935 list_add(&vma->vm_link, &vma->vm->unbound_list); 1936 mutex_unlock(&vma->vm->mutex); 1937 1938 return vma; 1939 } 1940 1941 int gen6_ppgtt_pin(struct i915_hw_ppgtt *base) 1942 { 1943 struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base); 1944 int err; 1945 1946 /* 1947 * Workaround the limited maximum vma->pin_count and the aliasing_ppgtt 1948 * which will be pinned into every active context. 1949 * (When vma->pin_count becomes atomic, I expect we will naturally 1950 * need a larger, unpacked, type and kill this redundancy.) 1951 */ 1952 if (ppgtt->pin_count++) 1953 return 0; 1954 1955 /* 1956 * PPGTT PDEs reside in the GGTT and consists of 512 entries. The 1957 * allocator works in address space sizes, so it's multiplied by page 1958 * size. We allocate at the top of the GTT to avoid fragmentation. 1959 */ 1960 err = i915_vma_pin(ppgtt->vma, 1961 0, GEN6_PD_ALIGN, 1962 PIN_GLOBAL | PIN_HIGH); 1963 if (err) 1964 goto unpin; 1965 1966 return 0; 1967 1968 unpin: 1969 ppgtt->pin_count = 0; 1970 return err; 1971 } 1972 1973 void gen6_ppgtt_unpin(struct i915_hw_ppgtt *base) 1974 { 1975 struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base); 1976 1977 GEM_BUG_ON(!ppgtt->pin_count); 1978 if (--ppgtt->pin_count) 1979 return; 1980 1981 i915_vma_unpin(ppgtt->vma); 1982 } 1983 1984 static struct i915_hw_ppgtt *gen6_ppgtt_create(struct drm_i915_private *i915) 1985 { 1986 struct i915_ggtt * const ggtt = &i915->ggtt; 1987 struct gen6_hw_ppgtt *ppgtt; 1988 int err; 1989 1990 ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL); 1991 if (!ppgtt) 1992 return ERR_PTR(-ENOMEM); 1993 1994 kref_init(&ppgtt->base.ref); 1995 1996 ppgtt->base.vm.i915 = i915; 1997 ppgtt->base.vm.dma = &i915->drm.pdev->dev; 1998 1999 ppgtt->base.vm.total = I915_PDES * GEN6_PTES * I915_GTT_PAGE_SIZE; 2000 2001 i915_address_space_init(&ppgtt->base.vm, VM_CLASS_PPGTT); 2002 2003 ppgtt->base.vm.allocate_va_range = gen6_alloc_va_range; 2004 ppgtt->base.vm.clear_range = gen6_ppgtt_clear_range; 2005 ppgtt->base.vm.insert_entries = gen6_ppgtt_insert_entries; 2006 ppgtt->base.vm.cleanup = gen6_ppgtt_cleanup; 2007 2008 ppgtt->base.vm.vma_ops.bind_vma = ppgtt_bind_vma; 2009 ppgtt->base.vm.vma_ops.unbind_vma = ppgtt_unbind_vma; 2010 ppgtt->base.vm.vma_ops.set_pages = ppgtt_set_pages; 2011 ppgtt->base.vm.vma_ops.clear_pages = clear_pages; 2012 2013 ppgtt->base.vm.pte_encode = ggtt->vm.pte_encode; 2014 2015 err = gen6_ppgtt_init_scratch(ppgtt); 2016 if (err) 2017 goto err_free; 2018 2019 ppgtt->vma = pd_vma_create(ppgtt, GEN6_PD_SIZE); 2020 if (IS_ERR(ppgtt->vma)) { 2021 err = PTR_ERR(ppgtt->vma); 2022 goto err_scratch; 2023 } 2024 2025 return &ppgtt->base; 2026 2027 err_scratch: 2028 gen6_ppgtt_free_scratch(&ppgtt->base.vm); 2029 err_free: 2030 kfree(ppgtt); 2031 return ERR_PTR(err); 2032 } 2033 2034 static void gtt_write_workarounds(struct drm_i915_private *dev_priv) 2035 { 2036 /* This function is for gtt related workarounds. This function is 2037 * called on driver load and after a GPU reset, so you can place 2038 * workarounds here even if they get overwritten by GPU reset. 2039 */ 2040 /* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt,kbl,glk,cfl,cnl,icl */ 2041 if (IS_BROADWELL(dev_priv)) 2042 I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW); 2043 else if (IS_CHERRYVIEW(dev_priv)) 2044 I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV); 2045 else if (IS_GEN9_LP(dev_priv)) 2046 I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT); 2047 else if (INTEL_GEN(dev_priv) >= 9) 2048 I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL); 2049 2050 /* 2051 * To support 64K PTEs we need to first enable the use of the 2052 * Intermediate-Page-Size(IPS) bit of the PDE field via some magical 2053 * mmio, otherwise the page-walker will simply ignore the IPS bit. This 2054 * shouldn't be needed after GEN10. 2055 * 2056 * 64K pages were first introduced from BDW+, although technically they 2057 * only *work* from gen9+. For pre-BDW we instead have the option for 2058 * 32K pages, but we don't currently have any support for it in our 2059 * driver. 2060 */ 2061 if (HAS_PAGE_SIZES(dev_priv, I915_GTT_PAGE_SIZE_64K) && 2062 INTEL_GEN(dev_priv) <= 10) 2063 I915_WRITE(GEN8_GAMW_ECO_DEV_RW_IA, 2064 I915_READ(GEN8_GAMW_ECO_DEV_RW_IA) | 2065 GAMW_ECO_ENABLE_64K_IPS_FIELD); 2066 } 2067 2068 int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv) 2069 { 2070 gtt_write_workarounds(dev_priv); 2071 2072 if (IS_GEN(dev_priv, 6)) 2073 gen6_ppgtt_enable(dev_priv); 2074 else if (IS_GEN(dev_priv, 7)) 2075 gen7_ppgtt_enable(dev_priv); 2076 2077 return 0; 2078 } 2079 2080 static struct i915_hw_ppgtt * 2081 __hw_ppgtt_create(struct drm_i915_private *i915) 2082 { 2083 if (INTEL_GEN(i915) < 8) 2084 return gen6_ppgtt_create(i915); 2085 else 2086 return gen8_ppgtt_create(i915); 2087 } 2088 2089 struct i915_hw_ppgtt * 2090 i915_ppgtt_create(struct drm_i915_private *i915, 2091 struct drm_i915_file_private *fpriv) 2092 { 2093 struct i915_hw_ppgtt *ppgtt; 2094 2095 ppgtt = __hw_ppgtt_create(i915); 2096 if (IS_ERR(ppgtt)) 2097 return ppgtt; 2098 2099 ppgtt->vm.file = fpriv; 2100 2101 trace_i915_ppgtt_create(&ppgtt->vm); 2102 2103 return ppgtt; 2104 } 2105 2106 void i915_ppgtt_close(struct i915_address_space *vm) 2107 { 2108 GEM_BUG_ON(vm->closed); 2109 vm->closed = true; 2110 } 2111 2112 static void ppgtt_destroy_vma(struct i915_address_space *vm) 2113 { 2114 struct list_head *phases[] = { 2115 &vm->bound_list, 2116 &vm->unbound_list, 2117 NULL, 2118 }, **phase; 2119 2120 vm->closed = true; 2121 for (phase = phases; *phase; phase++) { 2122 struct i915_vma *vma, *vn; 2123 2124 list_for_each_entry_safe(vma, vn, *phase, vm_link) 2125 i915_vma_destroy(vma); 2126 } 2127 } 2128 2129 void i915_ppgtt_release(struct kref *kref) 2130 { 2131 struct i915_hw_ppgtt *ppgtt = 2132 container_of(kref, struct i915_hw_ppgtt, ref); 2133 2134 trace_i915_ppgtt_release(&ppgtt->vm); 2135 2136 ppgtt_destroy_vma(&ppgtt->vm); 2137 2138 GEM_BUG_ON(!list_empty(&ppgtt->vm.bound_list)); 2139 GEM_BUG_ON(!list_empty(&ppgtt->vm.unbound_list)); 2140 2141 ppgtt->vm.cleanup(&ppgtt->vm); 2142 i915_address_space_fini(&ppgtt->vm); 2143 kfree(ppgtt); 2144 } 2145 2146 /* Certain Gen5 chipsets require require idling the GPU before 2147 * unmapping anything from the GTT when VT-d is enabled. 2148 */ 2149 static bool needs_idle_maps(struct drm_i915_private *dev_priv) 2150 { 2151 /* Query intel_iommu to see if we need the workaround. Presumably that 2152 * was loaded first. 2153 */ 2154 return IS_GEN(dev_priv, 5) && IS_MOBILE(dev_priv) && intel_vtd_active(); 2155 } 2156 2157 static void gen6_check_faults(struct drm_i915_private *dev_priv) 2158 { 2159 struct intel_engine_cs *engine; 2160 enum intel_engine_id id; 2161 u32 fault; 2162 2163 for_each_engine(engine, dev_priv, id) { 2164 fault = I915_READ(RING_FAULT_REG(engine)); 2165 if (fault & RING_FAULT_VALID) { 2166 DRM_DEBUG_DRIVER("Unexpected fault\n" 2167 "\tAddr: 0x%08lx\n" 2168 "\tAddress space: %s\n" 2169 "\tSource ID: %d\n" 2170 "\tType: %d\n", 2171 fault & PAGE_MASK, 2172 fault & RING_FAULT_GTTSEL_MASK ? "GGTT" : "PPGTT", 2173 RING_FAULT_SRCID(fault), 2174 RING_FAULT_FAULT_TYPE(fault)); 2175 } 2176 } 2177 } 2178 2179 static void gen8_check_faults(struct drm_i915_private *dev_priv) 2180 { 2181 u32 fault = I915_READ(GEN8_RING_FAULT_REG); 2182 2183 if (fault & RING_FAULT_VALID) { 2184 u32 fault_data0, fault_data1; 2185 u64 fault_addr; 2186 2187 fault_data0 = I915_READ(GEN8_FAULT_TLB_DATA0); 2188 fault_data1 = I915_READ(GEN8_FAULT_TLB_DATA1); 2189 fault_addr = ((u64)(fault_data1 & FAULT_VA_HIGH_BITS) << 44) | 2190 ((u64)fault_data0 << 12); 2191 2192 DRM_DEBUG_DRIVER("Unexpected fault\n" 2193 "\tAddr: 0x%08x_%08x\n" 2194 "\tAddress space: %s\n" 2195 "\tEngine ID: %d\n" 2196 "\tSource ID: %d\n" 2197 "\tType: %d\n", 2198 upper_32_bits(fault_addr), 2199 lower_32_bits(fault_addr), 2200 fault_data1 & FAULT_GTT_SEL ? "GGTT" : "PPGTT", 2201 GEN8_RING_FAULT_ENGINE_ID(fault), 2202 RING_FAULT_SRCID(fault), 2203 RING_FAULT_FAULT_TYPE(fault)); 2204 } 2205 } 2206 2207 void i915_check_and_clear_faults(struct drm_i915_private *dev_priv) 2208 { 2209 /* From GEN8 onwards we only have one 'All Engine Fault Register' */ 2210 if (INTEL_GEN(dev_priv) >= 8) 2211 gen8_check_faults(dev_priv); 2212 else if (INTEL_GEN(dev_priv) >= 6) 2213 gen6_check_faults(dev_priv); 2214 else 2215 return; 2216 2217 i915_clear_error_registers(dev_priv); 2218 } 2219 2220 void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv) 2221 { 2222 struct i915_ggtt *ggtt = &dev_priv->ggtt; 2223 2224 /* Don't bother messing with faults pre GEN6 as we have little 2225 * documentation supporting that it's a good idea. 2226 */ 2227 if (INTEL_GEN(dev_priv) < 6) 2228 return; 2229 2230 i915_check_and_clear_faults(dev_priv); 2231 2232 ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total); 2233 2234 i915_ggtt_invalidate(dev_priv); 2235 } 2236 2237 int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj, 2238 struct sg_table *pages) 2239 { 2240 do { 2241 if (dma_map_sg_attrs(&obj->base.dev->pdev->dev, 2242 pages->sgl, pages->nents, 2243 PCI_DMA_BIDIRECTIONAL, 2244 DMA_ATTR_NO_WARN)) 2245 return 0; 2246 2247 /* 2248 * If the DMA remap fails, one cause can be that we have 2249 * too many objects pinned in a small remapping table, 2250 * such as swiotlb. Incrementally purge all other objects and 2251 * try again - if there are no more pages to remove from 2252 * the DMA remapper, i915_gem_shrink will return 0. 2253 */ 2254 GEM_BUG_ON(obj->mm.pages == pages); 2255 } while (i915_gem_shrink(to_i915(obj->base.dev), 2256 obj->base.size >> PAGE_SHIFT, NULL, 2257 I915_SHRINK_BOUND | 2258 I915_SHRINK_UNBOUND)); 2259 2260 return -ENOSPC; 2261 } 2262 2263 static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte) 2264 { 2265 writeq(pte, addr); 2266 } 2267 2268 static void gen8_ggtt_insert_page(struct i915_address_space *vm, 2269 dma_addr_t addr, 2270 u64 offset, 2271 enum i915_cache_level level, 2272 u32 unused) 2273 { 2274 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); 2275 gen8_pte_t __iomem *pte = 2276 (gen8_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE; 2277 2278 gen8_set_pte(pte, gen8_pte_encode(addr, level, 0)); 2279 2280 ggtt->invalidate(vm->i915); 2281 } 2282 2283 static void gen8_ggtt_insert_entries(struct i915_address_space *vm, 2284 struct i915_vma *vma, 2285 enum i915_cache_level level, 2286 u32 flags) 2287 { 2288 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); 2289 struct sgt_iter sgt_iter; 2290 gen8_pte_t __iomem *gtt_entries; 2291 const gen8_pte_t pte_encode = gen8_pte_encode(0, level, 0); 2292 dma_addr_t addr; 2293 2294 /* 2295 * Note that we ignore PTE_READ_ONLY here. The caller must be careful 2296 * not to allow the user to override access to a read only page. 2297 */ 2298 2299 gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm; 2300 gtt_entries += vma->node.start / I915_GTT_PAGE_SIZE; 2301 for_each_sgt_dma(addr, sgt_iter, vma->pages) 2302 gen8_set_pte(gtt_entries++, pte_encode | addr); 2303 2304 /* 2305 * We want to flush the TLBs only after we're certain all the PTE 2306 * updates have finished. 2307 */ 2308 ggtt->invalidate(vm->i915); 2309 } 2310 2311 static void gen6_ggtt_insert_page(struct i915_address_space *vm, 2312 dma_addr_t addr, 2313 u64 offset, 2314 enum i915_cache_level level, 2315 u32 flags) 2316 { 2317 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); 2318 gen6_pte_t __iomem *pte = 2319 (gen6_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE; 2320 2321 iowrite32(vm->pte_encode(addr, level, flags), pte); 2322 2323 ggtt->invalidate(vm->i915); 2324 } 2325 2326 /* 2327 * Binds an object into the global gtt with the specified cache level. The object 2328 * will be accessible to the GPU via commands whose operands reference offsets 2329 * within the global GTT as well as accessible by the GPU through the GMADR 2330 * mapped BAR (dev_priv->mm.gtt->gtt). 2331 */ 2332 static void gen6_ggtt_insert_entries(struct i915_address_space *vm, 2333 struct i915_vma *vma, 2334 enum i915_cache_level level, 2335 u32 flags) 2336 { 2337 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); 2338 gen6_pte_t __iomem *entries = (gen6_pte_t __iomem *)ggtt->gsm; 2339 unsigned int i = vma->node.start / I915_GTT_PAGE_SIZE; 2340 struct sgt_iter iter; 2341 dma_addr_t addr; 2342 for_each_sgt_dma(addr, iter, vma->pages) 2343 iowrite32(vm->pte_encode(addr, level, flags), &entries[i++]); 2344 2345 /* 2346 * We want to flush the TLBs only after we're certain all the PTE 2347 * updates have finished. 2348 */ 2349 ggtt->invalidate(vm->i915); 2350 } 2351 2352 static void nop_clear_range(struct i915_address_space *vm, 2353 u64 start, u64 length) 2354 { 2355 } 2356 2357 static void gen8_ggtt_clear_range(struct i915_address_space *vm, 2358 u64 start, u64 length) 2359 { 2360 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); 2361 unsigned first_entry = start / I915_GTT_PAGE_SIZE; 2362 unsigned num_entries = length / I915_GTT_PAGE_SIZE; 2363 const gen8_pte_t scratch_pte = vm->scratch_pte; 2364 gen8_pte_t __iomem *gtt_base = 2365 (gen8_pte_t __iomem *)ggtt->gsm + first_entry; 2366 const int max_entries = ggtt_total_entries(ggtt) - first_entry; 2367 int i; 2368 2369 if (WARN(num_entries > max_entries, 2370 "First entry = %d; Num entries = %d (max=%d)\n", 2371 first_entry, num_entries, max_entries)) 2372 num_entries = max_entries; 2373 2374 for (i = 0; i < num_entries; i++) 2375 gen8_set_pte(>t_base[i], scratch_pte); 2376 } 2377 2378 static void bxt_vtd_ggtt_wa(struct i915_address_space *vm) 2379 { 2380 struct drm_i915_private *dev_priv = vm->i915; 2381 2382 /* 2383 * Make sure the internal GAM fifo has been cleared of all GTT 2384 * writes before exiting stop_machine(). This guarantees that 2385 * any aperture accesses waiting to start in another process 2386 * cannot back up behind the GTT writes causing a hang. 2387 * The register can be any arbitrary GAM register. 2388 */ 2389 POSTING_READ(GFX_FLSH_CNTL_GEN6); 2390 } 2391 2392 struct insert_page { 2393 struct i915_address_space *vm; 2394 dma_addr_t addr; 2395 u64 offset; 2396 enum i915_cache_level level; 2397 }; 2398 2399 static int bxt_vtd_ggtt_insert_page__cb(void *_arg) 2400 { 2401 struct insert_page *arg = _arg; 2402 2403 gen8_ggtt_insert_page(arg->vm, arg->addr, arg->offset, arg->level, 0); 2404 bxt_vtd_ggtt_wa(arg->vm); 2405 2406 return 0; 2407 } 2408 2409 static void bxt_vtd_ggtt_insert_page__BKL(struct i915_address_space *vm, 2410 dma_addr_t addr, 2411 u64 offset, 2412 enum i915_cache_level level, 2413 u32 unused) 2414 { 2415 struct insert_page arg = { vm, addr, offset, level }; 2416 2417 stop_machine(bxt_vtd_ggtt_insert_page__cb, &arg, NULL); 2418 } 2419 2420 struct insert_entries { 2421 struct i915_address_space *vm; 2422 struct i915_vma *vma; 2423 enum i915_cache_level level; 2424 u32 flags; 2425 }; 2426 2427 static int bxt_vtd_ggtt_insert_entries__cb(void *_arg) 2428 { 2429 struct insert_entries *arg = _arg; 2430 2431 gen8_ggtt_insert_entries(arg->vm, arg->vma, arg->level, arg->flags); 2432 bxt_vtd_ggtt_wa(arg->vm); 2433 2434 return 0; 2435 } 2436 2437 static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm, 2438 struct i915_vma *vma, 2439 enum i915_cache_level level, 2440 u32 flags) 2441 { 2442 struct insert_entries arg = { vm, vma, level, flags }; 2443 2444 stop_machine(bxt_vtd_ggtt_insert_entries__cb, &arg, NULL); 2445 } 2446 2447 struct clear_range { 2448 struct i915_address_space *vm; 2449 u64 start; 2450 u64 length; 2451 }; 2452 2453 static int bxt_vtd_ggtt_clear_range__cb(void *_arg) 2454 { 2455 struct clear_range *arg = _arg; 2456 2457 gen8_ggtt_clear_range(arg->vm, arg->start, arg->length); 2458 bxt_vtd_ggtt_wa(arg->vm); 2459 2460 return 0; 2461 } 2462 2463 static void bxt_vtd_ggtt_clear_range__BKL(struct i915_address_space *vm, 2464 u64 start, 2465 u64 length) 2466 { 2467 struct clear_range arg = { vm, start, length }; 2468 2469 stop_machine(bxt_vtd_ggtt_clear_range__cb, &arg, NULL); 2470 } 2471 2472 static void gen6_ggtt_clear_range(struct i915_address_space *vm, 2473 u64 start, u64 length) 2474 { 2475 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); 2476 unsigned first_entry = start / I915_GTT_PAGE_SIZE; 2477 unsigned num_entries = length / I915_GTT_PAGE_SIZE; 2478 gen6_pte_t scratch_pte, __iomem *gtt_base = 2479 (gen6_pte_t __iomem *)ggtt->gsm + first_entry; 2480 const int max_entries = ggtt_total_entries(ggtt) - first_entry; 2481 int i; 2482 2483 if (WARN(num_entries > max_entries, 2484 "First entry = %d; Num entries = %d (max=%d)\n", 2485 first_entry, num_entries, max_entries)) 2486 num_entries = max_entries; 2487 2488 scratch_pte = vm->scratch_pte; 2489 2490 for (i = 0; i < num_entries; i++) 2491 iowrite32(scratch_pte, >t_base[i]); 2492 } 2493 2494 static void i915_ggtt_insert_page(struct i915_address_space *vm, 2495 dma_addr_t addr, 2496 u64 offset, 2497 enum i915_cache_level cache_level, 2498 u32 unused) 2499 { 2500 unsigned int flags = (cache_level == I915_CACHE_NONE) ? 2501 AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY; 2502 2503 intel_gtt_insert_page(addr, offset >> PAGE_SHIFT, flags); 2504 } 2505 2506 static void i915_ggtt_insert_entries(struct i915_address_space *vm, 2507 struct i915_vma *vma, 2508 enum i915_cache_level cache_level, 2509 u32 unused) 2510 { 2511 unsigned int flags = (cache_level == I915_CACHE_NONE) ? 2512 AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY; 2513 2514 intel_gtt_insert_sg_entries(vma->pages, vma->node.start >> PAGE_SHIFT, 2515 flags); 2516 } 2517 2518 static void i915_ggtt_clear_range(struct i915_address_space *vm, 2519 u64 start, u64 length) 2520 { 2521 intel_gtt_clear_range(start >> PAGE_SHIFT, length >> PAGE_SHIFT); 2522 } 2523 2524 static int ggtt_bind_vma(struct i915_vma *vma, 2525 enum i915_cache_level cache_level, 2526 u32 flags) 2527 { 2528 struct drm_i915_private *i915 = vma->vm->i915; 2529 struct drm_i915_gem_object *obj = vma->obj; 2530 intel_wakeref_t wakeref; 2531 u32 pte_flags; 2532 2533 /* Applicable to VLV (gen8+ do not support RO in the GGTT) */ 2534 pte_flags = 0; 2535 if (i915_gem_object_is_readonly(obj)) 2536 pte_flags |= PTE_READ_ONLY; 2537 2538 with_intel_runtime_pm(i915, wakeref) 2539 vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags); 2540 2541 vma->page_sizes.gtt = I915_GTT_PAGE_SIZE; 2542 2543 /* 2544 * Without aliasing PPGTT there's no difference between 2545 * GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally 2546 * upgrade to both bound if we bind either to avoid double-binding. 2547 */ 2548 vma->flags |= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND; 2549 2550 return 0; 2551 } 2552 2553 static void ggtt_unbind_vma(struct i915_vma *vma) 2554 { 2555 struct drm_i915_private *i915 = vma->vm->i915; 2556 intel_wakeref_t wakeref; 2557 2558 with_intel_runtime_pm(i915, wakeref) 2559 vma->vm->clear_range(vma->vm, vma->node.start, vma->size); 2560 } 2561 2562 static int aliasing_gtt_bind_vma(struct i915_vma *vma, 2563 enum i915_cache_level cache_level, 2564 u32 flags) 2565 { 2566 struct drm_i915_private *i915 = vma->vm->i915; 2567 u32 pte_flags; 2568 int ret; 2569 2570 /* Currently applicable only to VLV */ 2571 pte_flags = 0; 2572 if (i915_gem_object_is_readonly(vma->obj)) 2573 pte_flags |= PTE_READ_ONLY; 2574 2575 if (flags & I915_VMA_LOCAL_BIND) { 2576 struct i915_hw_ppgtt *appgtt = i915->mm.aliasing_ppgtt; 2577 2578 if (!(vma->flags & I915_VMA_LOCAL_BIND)) { 2579 ret = appgtt->vm.allocate_va_range(&appgtt->vm, 2580 vma->node.start, 2581 vma->size); 2582 if (ret) 2583 return ret; 2584 } 2585 2586 appgtt->vm.insert_entries(&appgtt->vm, vma, cache_level, 2587 pte_flags); 2588 } 2589 2590 if (flags & I915_VMA_GLOBAL_BIND) { 2591 intel_wakeref_t wakeref; 2592 2593 with_intel_runtime_pm(i915, wakeref) { 2594 vma->vm->insert_entries(vma->vm, vma, 2595 cache_level, pte_flags); 2596 } 2597 } 2598 2599 return 0; 2600 } 2601 2602 static void aliasing_gtt_unbind_vma(struct i915_vma *vma) 2603 { 2604 struct drm_i915_private *i915 = vma->vm->i915; 2605 2606 if (vma->flags & I915_VMA_GLOBAL_BIND) { 2607 struct i915_address_space *vm = vma->vm; 2608 intel_wakeref_t wakeref; 2609 2610 with_intel_runtime_pm(i915, wakeref) 2611 vm->clear_range(vm, vma->node.start, vma->size); 2612 } 2613 2614 if (vma->flags & I915_VMA_LOCAL_BIND) { 2615 struct i915_address_space *vm = &i915->mm.aliasing_ppgtt->vm; 2616 2617 vm->clear_range(vm, vma->node.start, vma->size); 2618 } 2619 } 2620 2621 void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj, 2622 struct sg_table *pages) 2623 { 2624 struct drm_i915_private *dev_priv = to_i915(obj->base.dev); 2625 struct device *kdev = &dev_priv->drm.pdev->dev; 2626 struct i915_ggtt *ggtt = &dev_priv->ggtt; 2627 2628 if (unlikely(ggtt->do_idle_maps)) { 2629 if (i915_gem_wait_for_idle(dev_priv, 0, MAX_SCHEDULE_TIMEOUT)) { 2630 DRM_ERROR("Failed to wait for idle; VT'd may hang.\n"); 2631 /* Wait a bit, in hopes it avoids the hang */ 2632 udelay(10); 2633 } 2634 } 2635 2636 dma_unmap_sg(kdev, pages->sgl, pages->nents, PCI_DMA_BIDIRECTIONAL); 2637 } 2638 2639 static int ggtt_set_pages(struct i915_vma *vma) 2640 { 2641 int ret; 2642 2643 GEM_BUG_ON(vma->pages); 2644 2645 ret = i915_get_ggtt_vma_pages(vma); 2646 if (ret) 2647 return ret; 2648 2649 vma->page_sizes = vma->obj->mm.page_sizes; 2650 2651 return 0; 2652 } 2653 2654 static void i915_gtt_color_adjust(const struct drm_mm_node *node, 2655 unsigned long color, 2656 u64 *start, 2657 u64 *end) 2658 { 2659 if (node->allocated && node->color != color) 2660 *start += I915_GTT_PAGE_SIZE; 2661 2662 /* Also leave a space between the unallocated reserved node after the 2663 * GTT and any objects within the GTT, i.e. we use the color adjustment 2664 * to insert a guard page to prevent prefetches crossing over the 2665 * GTT boundary. 2666 */ 2667 node = list_next_entry(node, node_list); 2668 if (node->color != color) 2669 *end -= I915_GTT_PAGE_SIZE; 2670 } 2671 2672 int i915_gem_init_aliasing_ppgtt(struct drm_i915_private *i915) 2673 { 2674 struct i915_ggtt *ggtt = &i915->ggtt; 2675 struct i915_hw_ppgtt *ppgtt; 2676 int err; 2677 2678 ppgtt = i915_ppgtt_create(i915, ERR_PTR(-EPERM)); 2679 if (IS_ERR(ppgtt)) 2680 return PTR_ERR(ppgtt); 2681 2682 if (GEM_WARN_ON(ppgtt->vm.total < ggtt->vm.total)) { 2683 err = -ENODEV; 2684 goto err_ppgtt; 2685 } 2686 2687 /* 2688 * Note we only pre-allocate as far as the end of the global 2689 * GTT. On 48b / 4-level page-tables, the difference is very, 2690 * very significant! We have to preallocate as GVT/vgpu does 2691 * not like the page directory disappearing. 2692 */ 2693 err = ppgtt->vm.allocate_va_range(&ppgtt->vm, 0, ggtt->vm.total); 2694 if (err) 2695 goto err_ppgtt; 2696 2697 i915->mm.aliasing_ppgtt = ppgtt; 2698 2699 GEM_BUG_ON(ggtt->vm.vma_ops.bind_vma != ggtt_bind_vma); 2700 ggtt->vm.vma_ops.bind_vma = aliasing_gtt_bind_vma; 2701 2702 GEM_BUG_ON(ggtt->vm.vma_ops.unbind_vma != ggtt_unbind_vma); 2703 ggtt->vm.vma_ops.unbind_vma = aliasing_gtt_unbind_vma; 2704 2705 return 0; 2706 2707 err_ppgtt: 2708 i915_ppgtt_put(ppgtt); 2709 return err; 2710 } 2711 2712 void i915_gem_fini_aliasing_ppgtt(struct drm_i915_private *i915) 2713 { 2714 struct i915_ggtt *ggtt = &i915->ggtt; 2715 struct i915_hw_ppgtt *ppgtt; 2716 2717 ppgtt = fetch_and_zero(&i915->mm.aliasing_ppgtt); 2718 if (!ppgtt) 2719 return; 2720 2721 i915_ppgtt_put(ppgtt); 2722 2723 ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma; 2724 ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma; 2725 } 2726 2727 int i915_gem_init_ggtt(struct drm_i915_private *dev_priv) 2728 { 2729 /* Let GEM Manage all of the aperture. 2730 * 2731 * However, leave one page at the end still bound to the scratch page. 2732 * There are a number of places where the hardware apparently prefetches 2733 * past the end of the object, and we've seen multiple hangs with the 2734 * GPU head pointer stuck in a batchbuffer bound at the last page of the 2735 * aperture. One page should be enough to keep any prefetching inside 2736 * of the aperture. 2737 */ 2738 struct i915_ggtt *ggtt = &dev_priv->ggtt; 2739 unsigned long hole_start, hole_end; 2740 struct drm_mm_node *entry; 2741 int ret; 2742 2743 /* 2744 * GuC requires all resources that we're sharing with it to be placed in 2745 * non-WOPCM memory. If GuC is not present or not in use we still need a 2746 * small bias as ring wraparound at offset 0 sometimes hangs. No idea 2747 * why. 2748 */ 2749 ggtt->pin_bias = max_t(u32, I915_GTT_PAGE_SIZE, 2750 intel_guc_reserved_gtt_size(&dev_priv->guc)); 2751 2752 ret = intel_vgt_balloon(dev_priv); 2753 if (ret) 2754 return ret; 2755 2756 /* Reserve a mappable slot for our lockless error capture */ 2757 ret = drm_mm_insert_node_in_range(&ggtt->vm.mm, &ggtt->error_capture, 2758 PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE, 2759 0, ggtt->mappable_end, 2760 DRM_MM_INSERT_LOW); 2761 if (ret) 2762 return ret; 2763 2764 /* Clear any non-preallocated blocks */ 2765 drm_mm_for_each_hole(entry, &ggtt->vm.mm, hole_start, hole_end) { 2766 DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n", 2767 hole_start, hole_end); 2768 ggtt->vm.clear_range(&ggtt->vm, hole_start, 2769 hole_end - hole_start); 2770 } 2771 2772 /* And finally clear the reserved guard page */ 2773 ggtt->vm.clear_range(&ggtt->vm, ggtt->vm.total - PAGE_SIZE, PAGE_SIZE); 2774 2775 if (INTEL_PPGTT(dev_priv) == INTEL_PPGTT_ALIASING) { 2776 ret = i915_gem_init_aliasing_ppgtt(dev_priv); 2777 if (ret) 2778 goto err; 2779 } 2780 2781 return 0; 2782 2783 err: 2784 drm_mm_remove_node(&ggtt->error_capture); 2785 return ret; 2786 } 2787 2788 /** 2789 * i915_ggtt_cleanup_hw - Clean up GGTT hardware initialization 2790 * @dev_priv: i915 device 2791 */ 2792 void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv) 2793 { 2794 struct i915_ggtt *ggtt = &dev_priv->ggtt; 2795 struct i915_vma *vma, *vn; 2796 struct pagevec *pvec; 2797 2798 ggtt->vm.closed = true; 2799 2800 mutex_lock(&dev_priv->drm.struct_mutex); 2801 i915_gem_fini_aliasing_ppgtt(dev_priv); 2802 2803 list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link) 2804 WARN_ON(i915_vma_unbind(vma)); 2805 2806 if (drm_mm_node_allocated(&ggtt->error_capture)) 2807 drm_mm_remove_node(&ggtt->error_capture); 2808 2809 if (drm_mm_initialized(&ggtt->vm.mm)) { 2810 intel_vgt_deballoon(dev_priv); 2811 i915_address_space_fini(&ggtt->vm); 2812 } 2813 2814 ggtt->vm.cleanup(&ggtt->vm); 2815 2816 pvec = &dev_priv->mm.wc_stash.pvec; 2817 if (pvec->nr) { 2818 set_pages_array_wb(pvec->pages, pvec->nr); 2819 __pagevec_release(pvec); 2820 } 2821 2822 mutex_unlock(&dev_priv->drm.struct_mutex); 2823 2824 arch_phys_wc_del(ggtt->mtrr); 2825 io_mapping_fini(&ggtt->iomap); 2826 2827 i915_gem_cleanup_stolen(dev_priv); 2828 } 2829 2830 static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl) 2831 { 2832 snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT; 2833 snb_gmch_ctl &= SNB_GMCH_GGMS_MASK; 2834 return snb_gmch_ctl << 20; 2835 } 2836 2837 static unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl) 2838 { 2839 bdw_gmch_ctl >>= BDW_GMCH_GGMS_SHIFT; 2840 bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK; 2841 if (bdw_gmch_ctl) 2842 bdw_gmch_ctl = 1 << bdw_gmch_ctl; 2843 2844 #ifdef CONFIG_X86_32 2845 /* Limit 32b platforms to a 2GB GGTT: 4 << 20 / pte size * I915_GTT_PAGE_SIZE */ 2846 if (bdw_gmch_ctl > 4) 2847 bdw_gmch_ctl = 4; 2848 #endif 2849 2850 return bdw_gmch_ctl << 20; 2851 } 2852 2853 static unsigned int chv_get_total_gtt_size(u16 gmch_ctrl) 2854 { 2855 gmch_ctrl >>= SNB_GMCH_GGMS_SHIFT; 2856 gmch_ctrl &= SNB_GMCH_GGMS_MASK; 2857 2858 if (gmch_ctrl) 2859 return 1 << (20 + gmch_ctrl); 2860 2861 return 0; 2862 } 2863 2864 static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size) 2865 { 2866 struct drm_i915_private *dev_priv = ggtt->vm.i915; 2867 struct pci_dev *pdev = dev_priv->drm.pdev; 2868 phys_addr_t phys_addr; 2869 int ret; 2870 2871 /* For Modern GENs the PTEs and register space are split in the BAR */ 2872 phys_addr = pci_resource_start(pdev, 0) + pci_resource_len(pdev, 0) / 2; 2873 2874 /* 2875 * On BXT+/CNL+ writes larger than 64 bit to the GTT pagetable range 2876 * will be dropped. For WC mappings in general we have 64 byte burst 2877 * writes when the WC buffer is flushed, so we can't use it, but have to 2878 * resort to an uncached mapping. The WC issue is easily caught by the 2879 * readback check when writing GTT PTE entries. 2880 */ 2881 if (IS_GEN9_LP(dev_priv) || INTEL_GEN(dev_priv) >= 10) 2882 ggtt->gsm = ioremap_nocache(phys_addr, size); 2883 else 2884 ggtt->gsm = ioremap_wc(phys_addr, size); 2885 if (!ggtt->gsm) { 2886 DRM_ERROR("Failed to map the ggtt page table\n"); 2887 return -ENOMEM; 2888 } 2889 2890 ret = setup_scratch_page(&ggtt->vm, GFP_DMA32); 2891 if (ret) { 2892 DRM_ERROR("Scratch setup failed\n"); 2893 /* iounmap will also get called at remove, but meh */ 2894 iounmap(ggtt->gsm); 2895 return ret; 2896 } 2897 2898 ggtt->vm.scratch_pte = 2899 ggtt->vm.pte_encode(ggtt->vm.scratch_page.daddr, 2900 I915_CACHE_NONE, 0); 2901 2902 return 0; 2903 } 2904 2905 static struct intel_ppat_entry * 2906 __alloc_ppat_entry(struct intel_ppat *ppat, unsigned int index, u8 value) 2907 { 2908 struct intel_ppat_entry *entry = &ppat->entries[index]; 2909 2910 GEM_BUG_ON(index >= ppat->max_entries); 2911 GEM_BUG_ON(test_bit(index, ppat->used)); 2912 2913 entry->ppat = ppat; 2914 entry->value = value; 2915 kref_init(&entry->ref); 2916 set_bit(index, ppat->used); 2917 set_bit(index, ppat->dirty); 2918 2919 return entry; 2920 } 2921 2922 static void __free_ppat_entry(struct intel_ppat_entry *entry) 2923 { 2924 struct intel_ppat *ppat = entry->ppat; 2925 unsigned int index = entry - ppat->entries; 2926 2927 GEM_BUG_ON(index >= ppat->max_entries); 2928 GEM_BUG_ON(!test_bit(index, ppat->used)); 2929 2930 entry->value = ppat->clear_value; 2931 clear_bit(index, ppat->used); 2932 set_bit(index, ppat->dirty); 2933 } 2934 2935 /** 2936 * intel_ppat_get - get a usable PPAT entry 2937 * @i915: i915 device instance 2938 * @value: the PPAT value required by the caller 2939 * 2940 * The function tries to search if there is an existing PPAT entry which 2941 * matches with the required value. If perfectly matched, the existing PPAT 2942 * entry will be used. If only partially matched, it will try to check if 2943 * there is any available PPAT index. If yes, it will allocate a new PPAT 2944 * index for the required entry and update the HW. If not, the partially 2945 * matched entry will be used. 2946 */ 2947 const struct intel_ppat_entry * 2948 intel_ppat_get(struct drm_i915_private *i915, u8 value) 2949 { 2950 struct intel_ppat *ppat = &i915->ppat; 2951 struct intel_ppat_entry *entry = NULL; 2952 unsigned int scanned, best_score; 2953 int i; 2954 2955 GEM_BUG_ON(!ppat->max_entries); 2956 2957 scanned = best_score = 0; 2958 for_each_set_bit(i, ppat->used, ppat->max_entries) { 2959 unsigned int score; 2960 2961 score = ppat->match(ppat->entries[i].value, value); 2962 if (score > best_score) { 2963 entry = &ppat->entries[i]; 2964 if (score == INTEL_PPAT_PERFECT_MATCH) { 2965 kref_get(&entry->ref); 2966 return entry; 2967 } 2968 best_score = score; 2969 } 2970 scanned++; 2971 } 2972 2973 if (scanned == ppat->max_entries) { 2974 if (!entry) 2975 return ERR_PTR(-ENOSPC); 2976 2977 kref_get(&entry->ref); 2978 return entry; 2979 } 2980 2981 i = find_first_zero_bit(ppat->used, ppat->max_entries); 2982 entry = __alloc_ppat_entry(ppat, i, value); 2983 ppat->update_hw(i915); 2984 return entry; 2985 } 2986 2987 static void release_ppat(struct kref *kref) 2988 { 2989 struct intel_ppat_entry *entry = 2990 container_of(kref, struct intel_ppat_entry, ref); 2991 struct drm_i915_private *i915 = entry->ppat->i915; 2992 2993 __free_ppat_entry(entry); 2994 entry->ppat->update_hw(i915); 2995 } 2996 2997 /** 2998 * intel_ppat_put - put back the PPAT entry got from intel_ppat_get() 2999 * @entry: an intel PPAT entry 3000 * 3001 * Put back the PPAT entry got from intel_ppat_get(). If the PPAT index of the 3002 * entry is dynamically allocated, its reference count will be decreased. Once 3003 * the reference count becomes into zero, the PPAT index becomes free again. 3004 */ 3005 void intel_ppat_put(const struct intel_ppat_entry *entry) 3006 { 3007 struct intel_ppat *ppat = entry->ppat; 3008 unsigned int index = entry - ppat->entries; 3009 3010 GEM_BUG_ON(!ppat->max_entries); 3011 3012 kref_put(&ppat->entries[index].ref, release_ppat); 3013 } 3014 3015 static void cnl_private_pat_update_hw(struct drm_i915_private *dev_priv) 3016 { 3017 struct intel_ppat *ppat = &dev_priv->ppat; 3018 int i; 3019 3020 for_each_set_bit(i, ppat->dirty, ppat->max_entries) { 3021 I915_WRITE(GEN10_PAT_INDEX(i), ppat->entries[i].value); 3022 clear_bit(i, ppat->dirty); 3023 } 3024 } 3025 3026 static void bdw_private_pat_update_hw(struct drm_i915_private *dev_priv) 3027 { 3028 struct intel_ppat *ppat = &dev_priv->ppat; 3029 u64 pat = 0; 3030 int i; 3031 3032 for (i = 0; i < ppat->max_entries; i++) 3033 pat |= GEN8_PPAT(i, ppat->entries[i].value); 3034 3035 bitmap_clear(ppat->dirty, 0, ppat->max_entries); 3036 3037 I915_WRITE(GEN8_PRIVATE_PAT_LO, lower_32_bits(pat)); 3038 I915_WRITE(GEN8_PRIVATE_PAT_HI, upper_32_bits(pat)); 3039 } 3040 3041 static unsigned int bdw_private_pat_match(u8 src, u8 dst) 3042 { 3043 unsigned int score = 0; 3044 enum { 3045 AGE_MATCH = BIT(0), 3046 TC_MATCH = BIT(1), 3047 CA_MATCH = BIT(2), 3048 }; 3049 3050 /* Cache attribute has to be matched. */ 3051 if (GEN8_PPAT_GET_CA(src) != GEN8_PPAT_GET_CA(dst)) 3052 return 0; 3053 3054 score |= CA_MATCH; 3055 3056 if (GEN8_PPAT_GET_TC(src) == GEN8_PPAT_GET_TC(dst)) 3057 score |= TC_MATCH; 3058 3059 if (GEN8_PPAT_GET_AGE(src) == GEN8_PPAT_GET_AGE(dst)) 3060 score |= AGE_MATCH; 3061 3062 if (score == (AGE_MATCH | TC_MATCH | CA_MATCH)) 3063 return INTEL_PPAT_PERFECT_MATCH; 3064 3065 return score; 3066 } 3067 3068 static unsigned int chv_private_pat_match(u8 src, u8 dst) 3069 { 3070 return (CHV_PPAT_GET_SNOOP(src) == CHV_PPAT_GET_SNOOP(dst)) ? 3071 INTEL_PPAT_PERFECT_MATCH : 0; 3072 } 3073 3074 static void cnl_setup_private_ppat(struct intel_ppat *ppat) 3075 { 3076 ppat->max_entries = 8; 3077 ppat->update_hw = cnl_private_pat_update_hw; 3078 ppat->match = bdw_private_pat_match; 3079 ppat->clear_value = GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3); 3080 3081 __alloc_ppat_entry(ppat, 0, GEN8_PPAT_WB | GEN8_PPAT_LLC); 3082 __alloc_ppat_entry(ppat, 1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC); 3083 __alloc_ppat_entry(ppat, 2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC); 3084 __alloc_ppat_entry(ppat, 3, GEN8_PPAT_UC); 3085 __alloc_ppat_entry(ppat, 4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)); 3086 __alloc_ppat_entry(ppat, 5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)); 3087 __alloc_ppat_entry(ppat, 6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)); 3088 __alloc_ppat_entry(ppat, 7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3)); 3089 } 3090 3091 /* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability 3092 * bits. When using advanced contexts each context stores its own PAT, but 3093 * writing this data shouldn't be harmful even in those cases. */ 3094 static void bdw_setup_private_ppat(struct intel_ppat *ppat) 3095 { 3096 ppat->max_entries = 8; 3097 ppat->update_hw = bdw_private_pat_update_hw; 3098 ppat->match = bdw_private_pat_match; 3099 ppat->clear_value = GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3); 3100 3101 if (!HAS_PPGTT(ppat->i915)) { 3102 /* Spec: "For GGTT, there is NO pat_sel[2:0] from the entry, 3103 * so RTL will always use the value corresponding to 3104 * pat_sel = 000". 3105 * So let's disable cache for GGTT to avoid screen corruptions. 3106 * MOCS still can be used though. 3107 * - System agent ggtt writes (i.e. cpu gtt mmaps) already work 3108 * before this patch, i.e. the same uncached + snooping access 3109 * like on gen6/7 seems to be in effect. 3110 * - So this just fixes blitter/render access. Again it looks 3111 * like it's not just uncached access, but uncached + snooping. 3112 * So we can still hold onto all our assumptions wrt cpu 3113 * clflushing on LLC machines. 3114 */ 3115 __alloc_ppat_entry(ppat, 0, GEN8_PPAT_UC); 3116 return; 3117 } 3118 3119 __alloc_ppat_entry(ppat, 0, GEN8_PPAT_WB | GEN8_PPAT_LLC); /* for normal objects, no eLLC */ 3120 __alloc_ppat_entry(ppat, 1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC); /* for something pointing to ptes? */ 3121 __alloc_ppat_entry(ppat, 2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC); /* for scanout with eLLC */ 3122 __alloc_ppat_entry(ppat, 3, GEN8_PPAT_UC); /* Uncached objects, mostly for scanout */ 3123 __alloc_ppat_entry(ppat, 4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)); 3124 __alloc_ppat_entry(ppat, 5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)); 3125 __alloc_ppat_entry(ppat, 6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)); 3126 __alloc_ppat_entry(ppat, 7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3)); 3127 } 3128 3129 static void chv_setup_private_ppat(struct intel_ppat *ppat) 3130 { 3131 ppat->max_entries = 8; 3132 ppat->update_hw = bdw_private_pat_update_hw; 3133 ppat->match = chv_private_pat_match; 3134 ppat->clear_value = CHV_PPAT_SNOOP; 3135 3136 /* 3137 * Map WB on BDW to snooped on CHV. 3138 * 3139 * Only the snoop bit has meaning for CHV, the rest is 3140 * ignored. 3141 * 3142 * The hardware will never snoop for certain types of accesses: 3143 * - CPU GTT (GMADR->GGTT->no snoop->memory) 3144 * - PPGTT page tables 3145 * - some other special cycles 3146 * 3147 * As with BDW, we also need to consider the following for GT accesses: 3148 * "For GGTT, there is NO pat_sel[2:0] from the entry, 3149 * so RTL will always use the value corresponding to 3150 * pat_sel = 000". 3151 * Which means we must set the snoop bit in PAT entry 0 3152 * in order to keep the global status page working. 3153 */ 3154 3155 __alloc_ppat_entry(ppat, 0, CHV_PPAT_SNOOP); 3156 __alloc_ppat_entry(ppat, 1, 0); 3157 __alloc_ppat_entry(ppat, 2, 0); 3158 __alloc_ppat_entry(ppat, 3, 0); 3159 __alloc_ppat_entry(ppat, 4, CHV_PPAT_SNOOP); 3160 __alloc_ppat_entry(ppat, 5, CHV_PPAT_SNOOP); 3161 __alloc_ppat_entry(ppat, 6, CHV_PPAT_SNOOP); 3162 __alloc_ppat_entry(ppat, 7, CHV_PPAT_SNOOP); 3163 } 3164 3165 static void gen6_gmch_remove(struct i915_address_space *vm) 3166 { 3167 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); 3168 3169 iounmap(ggtt->gsm); 3170 cleanup_scratch_page(vm); 3171 } 3172 3173 static void setup_private_pat(struct drm_i915_private *dev_priv) 3174 { 3175 struct intel_ppat *ppat = &dev_priv->ppat; 3176 int i; 3177 3178 ppat->i915 = dev_priv; 3179 3180 if (INTEL_GEN(dev_priv) >= 10) 3181 cnl_setup_private_ppat(ppat); 3182 else if (IS_CHERRYVIEW(dev_priv) || IS_GEN9_LP(dev_priv)) 3183 chv_setup_private_ppat(ppat); 3184 else 3185 bdw_setup_private_ppat(ppat); 3186 3187 GEM_BUG_ON(ppat->max_entries > INTEL_MAX_PPAT_ENTRIES); 3188 3189 for_each_clear_bit(i, ppat->used, ppat->max_entries) { 3190 ppat->entries[i].value = ppat->clear_value; 3191 ppat->entries[i].ppat = ppat; 3192 set_bit(i, ppat->dirty); 3193 } 3194 3195 ppat->update_hw(dev_priv); 3196 } 3197 3198 static int gen8_gmch_probe(struct i915_ggtt *ggtt) 3199 { 3200 struct drm_i915_private *dev_priv = ggtt->vm.i915; 3201 struct pci_dev *pdev = dev_priv->drm.pdev; 3202 unsigned int size; 3203 u16 snb_gmch_ctl; 3204 int err; 3205 3206 /* TODO: We're not aware of mappable constraints on gen8 yet */ 3207 ggtt->gmadr = 3208 (struct resource) DEFINE_RES_MEM(pci_resource_start(pdev, 2), 3209 pci_resource_len(pdev, 2)); 3210 ggtt->mappable_end = resource_size(&ggtt->gmadr); 3211 3212 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(39)); 3213 if (!err) 3214 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(39)); 3215 if (err) 3216 DRM_ERROR("Can't set DMA mask/consistent mask (%d)\n", err); 3217 3218 pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl); 3219 if (IS_CHERRYVIEW(dev_priv)) 3220 size = chv_get_total_gtt_size(snb_gmch_ctl); 3221 else 3222 size = gen8_get_total_gtt_size(snb_gmch_ctl); 3223 3224 ggtt->vm.total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE; 3225 ggtt->vm.cleanup = gen6_gmch_remove; 3226 ggtt->vm.insert_page = gen8_ggtt_insert_page; 3227 ggtt->vm.clear_range = nop_clear_range; 3228 if (intel_scanout_needs_vtd_wa(dev_priv)) 3229 ggtt->vm.clear_range = gen8_ggtt_clear_range; 3230 3231 ggtt->vm.insert_entries = gen8_ggtt_insert_entries; 3232 3233 /* Serialize GTT updates with aperture access on BXT if VT-d is on. */ 3234 if (intel_ggtt_update_needs_vtd_wa(dev_priv) || 3235 IS_CHERRYVIEW(dev_priv) /* fails with concurrent use/update */) { 3236 ggtt->vm.insert_entries = bxt_vtd_ggtt_insert_entries__BKL; 3237 ggtt->vm.insert_page = bxt_vtd_ggtt_insert_page__BKL; 3238 if (ggtt->vm.clear_range != nop_clear_range) 3239 ggtt->vm.clear_range = bxt_vtd_ggtt_clear_range__BKL; 3240 3241 /* Prevent recursively calling stop_machine() and deadlocks. */ 3242 dev_info(dev_priv->drm.dev, 3243 "Disabling error capture for VT-d workaround\n"); 3244 i915_disable_error_state(dev_priv, -ENODEV); 3245 } 3246 3247 ggtt->invalidate = gen6_ggtt_invalidate; 3248 3249 ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma; 3250 ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma; 3251 ggtt->vm.vma_ops.set_pages = ggtt_set_pages; 3252 ggtt->vm.vma_ops.clear_pages = clear_pages; 3253 3254 ggtt->vm.pte_encode = gen8_pte_encode; 3255 3256 setup_private_pat(dev_priv); 3257 3258 return ggtt_probe_common(ggtt, size); 3259 } 3260 3261 static int gen6_gmch_probe(struct i915_ggtt *ggtt) 3262 { 3263 struct drm_i915_private *dev_priv = ggtt->vm.i915; 3264 struct pci_dev *pdev = dev_priv->drm.pdev; 3265 unsigned int size; 3266 u16 snb_gmch_ctl; 3267 int err; 3268 3269 ggtt->gmadr = 3270 (struct resource) DEFINE_RES_MEM(pci_resource_start(pdev, 2), 3271 pci_resource_len(pdev, 2)); 3272 ggtt->mappable_end = resource_size(&ggtt->gmadr); 3273 3274 /* 64/512MB is the current min/max we actually know of, but this is just 3275 * a coarse sanity check. 3276 */ 3277 if (ggtt->mappable_end < (64<<20) || ggtt->mappable_end > (512<<20)) { 3278 DRM_ERROR("Unknown GMADR size (%pa)\n", &ggtt->mappable_end); 3279 return -ENXIO; 3280 } 3281 3282 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(40)); 3283 if (!err) 3284 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40)); 3285 if (err) 3286 DRM_ERROR("Can't set DMA mask/consistent mask (%d)\n", err); 3287 pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl); 3288 3289 size = gen6_get_total_gtt_size(snb_gmch_ctl); 3290 ggtt->vm.total = (size / sizeof(gen6_pte_t)) * I915_GTT_PAGE_SIZE; 3291 3292 ggtt->vm.clear_range = gen6_ggtt_clear_range; 3293 ggtt->vm.insert_page = gen6_ggtt_insert_page; 3294 ggtt->vm.insert_entries = gen6_ggtt_insert_entries; 3295 ggtt->vm.cleanup = gen6_gmch_remove; 3296 3297 ggtt->invalidate = gen6_ggtt_invalidate; 3298 3299 if (HAS_EDRAM(dev_priv)) 3300 ggtt->vm.pte_encode = iris_pte_encode; 3301 else if (IS_HASWELL(dev_priv)) 3302 ggtt->vm.pte_encode = hsw_pte_encode; 3303 else if (IS_VALLEYVIEW(dev_priv)) 3304 ggtt->vm.pte_encode = byt_pte_encode; 3305 else if (INTEL_GEN(dev_priv) >= 7) 3306 ggtt->vm.pte_encode = ivb_pte_encode; 3307 else 3308 ggtt->vm.pte_encode = snb_pte_encode; 3309 3310 ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma; 3311 ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma; 3312 ggtt->vm.vma_ops.set_pages = ggtt_set_pages; 3313 ggtt->vm.vma_ops.clear_pages = clear_pages; 3314 3315 return ggtt_probe_common(ggtt, size); 3316 } 3317 3318 static void i915_gmch_remove(struct i915_address_space *vm) 3319 { 3320 intel_gmch_remove(); 3321 } 3322 3323 static int i915_gmch_probe(struct i915_ggtt *ggtt) 3324 { 3325 struct drm_i915_private *dev_priv = ggtt->vm.i915; 3326 phys_addr_t gmadr_base; 3327 int ret; 3328 3329 ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->drm.pdev, NULL); 3330 if (!ret) { 3331 DRM_ERROR("failed to set up gmch\n"); 3332 return -EIO; 3333 } 3334 3335 intel_gtt_get(&ggtt->vm.total, &gmadr_base, &ggtt->mappable_end); 3336 3337 ggtt->gmadr = 3338 (struct resource) DEFINE_RES_MEM(gmadr_base, 3339 ggtt->mappable_end); 3340 3341 ggtt->do_idle_maps = needs_idle_maps(dev_priv); 3342 ggtt->vm.insert_page = i915_ggtt_insert_page; 3343 ggtt->vm.insert_entries = i915_ggtt_insert_entries; 3344 ggtt->vm.clear_range = i915_ggtt_clear_range; 3345 ggtt->vm.cleanup = i915_gmch_remove; 3346 3347 ggtt->invalidate = gmch_ggtt_invalidate; 3348 3349 ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma; 3350 ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma; 3351 ggtt->vm.vma_ops.set_pages = ggtt_set_pages; 3352 ggtt->vm.vma_ops.clear_pages = clear_pages; 3353 3354 if (unlikely(ggtt->do_idle_maps)) 3355 DRM_INFO("applying Ironlake quirks for intel_iommu\n"); 3356 3357 return 0; 3358 } 3359 3360 /** 3361 * i915_ggtt_probe_hw - Probe GGTT hardware location 3362 * @dev_priv: i915 device 3363 */ 3364 int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv) 3365 { 3366 struct i915_ggtt *ggtt = &dev_priv->ggtt; 3367 int ret; 3368 3369 ggtt->vm.i915 = dev_priv; 3370 ggtt->vm.dma = &dev_priv->drm.pdev->dev; 3371 3372 if (INTEL_GEN(dev_priv) <= 5) 3373 ret = i915_gmch_probe(ggtt); 3374 else if (INTEL_GEN(dev_priv) < 8) 3375 ret = gen6_gmch_probe(ggtt); 3376 else 3377 ret = gen8_gmch_probe(ggtt); 3378 if (ret) 3379 return ret; 3380 3381 /* Trim the GGTT to fit the GuC mappable upper range (when enabled). 3382 * This is easier than doing range restriction on the fly, as we 3383 * currently don't have any bits spare to pass in this upper 3384 * restriction! 3385 */ 3386 if (USES_GUC(dev_priv)) { 3387 ggtt->vm.total = min_t(u64, ggtt->vm.total, GUC_GGTT_TOP); 3388 ggtt->mappable_end = 3389 min_t(u64, ggtt->mappable_end, ggtt->vm.total); 3390 } 3391 3392 if ((ggtt->vm.total - 1) >> 32) { 3393 DRM_ERROR("We never expected a Global GTT with more than 32bits" 3394 " of address space! Found %lldM!\n", 3395 ggtt->vm.total >> 20); 3396 ggtt->vm.total = 1ULL << 32; 3397 ggtt->mappable_end = 3398 min_t(u64, ggtt->mappable_end, ggtt->vm.total); 3399 } 3400 3401 if (ggtt->mappable_end > ggtt->vm.total) { 3402 DRM_ERROR("mappable aperture extends past end of GGTT," 3403 " aperture=%pa, total=%llx\n", 3404 &ggtt->mappable_end, ggtt->vm.total); 3405 ggtt->mappable_end = ggtt->vm.total; 3406 } 3407 3408 /* GMADR is the PCI mmio aperture into the global GTT. */ 3409 DRM_DEBUG_DRIVER("GGTT size = %lluM\n", ggtt->vm.total >> 20); 3410 DRM_DEBUG_DRIVER("GMADR size = %lluM\n", (u64)ggtt->mappable_end >> 20); 3411 DRM_DEBUG_DRIVER("DSM size = %lluM\n", 3412 (u64)resource_size(&intel_graphics_stolen_res) >> 20); 3413 if (intel_vtd_active()) 3414 DRM_INFO("VT-d active for gfx access\n"); 3415 3416 return 0; 3417 } 3418 3419 /** 3420 * i915_ggtt_init_hw - Initialize GGTT hardware 3421 * @dev_priv: i915 device 3422 */ 3423 int i915_ggtt_init_hw(struct drm_i915_private *dev_priv) 3424 { 3425 struct i915_ggtt *ggtt = &dev_priv->ggtt; 3426 int ret; 3427 3428 stash_init(&dev_priv->mm.wc_stash); 3429 3430 /* Note that we use page colouring to enforce a guard page at the 3431 * end of the address space. This is required as the CS may prefetch 3432 * beyond the end of the batch buffer, across the page boundary, 3433 * and beyond the end of the GTT if we do not provide a guard. 3434 */ 3435 mutex_lock(&dev_priv->drm.struct_mutex); 3436 i915_address_space_init(&ggtt->vm, VM_CLASS_GGTT); 3437 3438 ggtt->vm.is_ggtt = true; 3439 3440 /* Only VLV supports read-only GGTT mappings */ 3441 ggtt->vm.has_read_only = IS_VALLEYVIEW(dev_priv); 3442 3443 if (!HAS_LLC(dev_priv) && !HAS_PPGTT(dev_priv)) 3444 ggtt->vm.mm.color_adjust = i915_gtt_color_adjust; 3445 mutex_unlock(&dev_priv->drm.struct_mutex); 3446 3447 if (!io_mapping_init_wc(&dev_priv->ggtt.iomap, 3448 dev_priv->ggtt.gmadr.start, 3449 dev_priv->ggtt.mappable_end)) { 3450 ret = -EIO; 3451 goto out_gtt_cleanup; 3452 } 3453 3454 ggtt->mtrr = arch_phys_wc_add(ggtt->gmadr.start, ggtt->mappable_end); 3455 3456 /* 3457 * Initialise stolen early so that we may reserve preallocated 3458 * objects for the BIOS to KMS transition. 3459 */ 3460 ret = i915_gem_init_stolen(dev_priv); 3461 if (ret) 3462 goto out_gtt_cleanup; 3463 3464 return 0; 3465 3466 out_gtt_cleanup: 3467 ggtt->vm.cleanup(&ggtt->vm); 3468 return ret; 3469 } 3470 3471 int i915_ggtt_enable_hw(struct drm_i915_private *dev_priv) 3472 { 3473 if (INTEL_GEN(dev_priv) < 6 && !intel_enable_gtt()) 3474 return -EIO; 3475 3476 return 0; 3477 } 3478 3479 void i915_ggtt_enable_guc(struct drm_i915_private *i915) 3480 { 3481 GEM_BUG_ON(i915->ggtt.invalidate != gen6_ggtt_invalidate); 3482 3483 i915->ggtt.invalidate = guc_ggtt_invalidate; 3484 3485 i915_ggtt_invalidate(i915); 3486 } 3487 3488 void i915_ggtt_disable_guc(struct drm_i915_private *i915) 3489 { 3490 /* XXX Temporary pardon for error unload */ 3491 if (i915->ggtt.invalidate == gen6_ggtt_invalidate) 3492 return; 3493 3494 /* We should only be called after i915_ggtt_enable_guc() */ 3495 GEM_BUG_ON(i915->ggtt.invalidate != guc_ggtt_invalidate); 3496 3497 i915->ggtt.invalidate = gen6_ggtt_invalidate; 3498 3499 i915_ggtt_invalidate(i915); 3500 } 3501 3502 void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv) 3503 { 3504 struct i915_ggtt *ggtt = &dev_priv->ggtt; 3505 struct i915_vma *vma, *vn; 3506 3507 i915_check_and_clear_faults(dev_priv); 3508 3509 mutex_lock(&ggtt->vm.mutex); 3510 3511 /* First fill our portion of the GTT with scratch pages */ 3512 ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total); 3513 ggtt->vm.closed = true; /* skip rewriting PTE on VMA unbind */ 3514 3515 /* clflush objects bound into the GGTT and rebind them. */ 3516 list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link) { 3517 struct drm_i915_gem_object *obj = vma->obj; 3518 3519 if (!(vma->flags & I915_VMA_GLOBAL_BIND)) 3520 continue; 3521 3522 mutex_unlock(&ggtt->vm.mutex); 3523 3524 if (!i915_vma_unbind(vma)) 3525 goto lock; 3526 3527 WARN_ON(i915_vma_bind(vma, 3528 obj ? obj->cache_level : 0, 3529 PIN_UPDATE)); 3530 if (obj) 3531 WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false)); 3532 3533 lock: 3534 mutex_lock(&ggtt->vm.mutex); 3535 } 3536 3537 ggtt->vm.closed = false; 3538 i915_ggtt_invalidate(dev_priv); 3539 3540 mutex_unlock(&ggtt->vm.mutex); 3541 3542 if (INTEL_GEN(dev_priv) >= 8) { 3543 struct intel_ppat *ppat = &dev_priv->ppat; 3544 3545 bitmap_set(ppat->dirty, 0, ppat->max_entries); 3546 dev_priv->ppat.update_hw(dev_priv); 3547 return; 3548 } 3549 } 3550 3551 static struct scatterlist * 3552 rotate_pages(struct drm_i915_gem_object *obj, unsigned int offset, 3553 unsigned int width, unsigned int height, 3554 unsigned int stride, 3555 struct sg_table *st, struct scatterlist *sg) 3556 { 3557 unsigned int column, row; 3558 unsigned int src_idx; 3559 3560 for (column = 0; column < width; column++) { 3561 src_idx = stride * (height - 1) + column + offset; 3562 for (row = 0; row < height; row++) { 3563 st->nents++; 3564 /* We don't need the pages, but need to initialize 3565 * the entries so the sg list can be happily traversed. 3566 * The only thing we need are DMA addresses. 3567 */ 3568 sg_set_page(sg, NULL, I915_GTT_PAGE_SIZE, 0); 3569 sg_dma_address(sg) = 3570 i915_gem_object_get_dma_address(obj, src_idx); 3571 sg_dma_len(sg) = I915_GTT_PAGE_SIZE; 3572 sg = sg_next(sg); 3573 src_idx -= stride; 3574 } 3575 } 3576 3577 return sg; 3578 } 3579 3580 static noinline struct sg_table * 3581 intel_rotate_pages(struct intel_rotation_info *rot_info, 3582 struct drm_i915_gem_object *obj) 3583 { 3584 unsigned int size = intel_rotation_info_size(rot_info); 3585 struct sg_table *st; 3586 struct scatterlist *sg; 3587 int ret = -ENOMEM; 3588 int i; 3589 3590 /* Allocate target SG list. */ 3591 st = kmalloc(sizeof(*st), GFP_KERNEL); 3592 if (!st) 3593 goto err_st_alloc; 3594 3595 ret = sg_alloc_table(st, size, GFP_KERNEL); 3596 if (ret) 3597 goto err_sg_alloc; 3598 3599 st->nents = 0; 3600 sg = st->sgl; 3601 3602 for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++) { 3603 sg = rotate_pages(obj, rot_info->plane[i].offset, 3604 rot_info->plane[i].width, rot_info->plane[i].height, 3605 rot_info->plane[i].stride, st, sg); 3606 } 3607 3608 return st; 3609 3610 err_sg_alloc: 3611 kfree(st); 3612 err_st_alloc: 3613 3614 DRM_DEBUG_DRIVER("Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n", 3615 obj->base.size, rot_info->plane[0].width, rot_info->plane[0].height, size); 3616 3617 return ERR_PTR(ret); 3618 } 3619 3620 static noinline struct sg_table * 3621 intel_partial_pages(const struct i915_ggtt_view *view, 3622 struct drm_i915_gem_object *obj) 3623 { 3624 struct sg_table *st; 3625 struct scatterlist *sg, *iter; 3626 unsigned int count = view->partial.size; 3627 unsigned int offset; 3628 int ret = -ENOMEM; 3629 3630 st = kmalloc(sizeof(*st), GFP_KERNEL); 3631 if (!st) 3632 goto err_st_alloc; 3633 3634 ret = sg_alloc_table(st, count, GFP_KERNEL); 3635 if (ret) 3636 goto err_sg_alloc; 3637 3638 iter = i915_gem_object_get_sg(obj, view->partial.offset, &offset); 3639 GEM_BUG_ON(!iter); 3640 3641 sg = st->sgl; 3642 st->nents = 0; 3643 do { 3644 unsigned int len; 3645 3646 len = min(iter->length - (offset << PAGE_SHIFT), 3647 count << PAGE_SHIFT); 3648 sg_set_page(sg, NULL, len, 0); 3649 sg_dma_address(sg) = 3650 sg_dma_address(iter) + (offset << PAGE_SHIFT); 3651 sg_dma_len(sg) = len; 3652 3653 st->nents++; 3654 count -= len >> PAGE_SHIFT; 3655 if (count == 0) { 3656 sg_mark_end(sg); 3657 i915_sg_trim(st); /* Drop any unused tail entries. */ 3658 3659 return st; 3660 } 3661 3662 sg = __sg_next(sg); 3663 iter = __sg_next(iter); 3664 offset = 0; 3665 } while (1); 3666 3667 err_sg_alloc: 3668 kfree(st); 3669 err_st_alloc: 3670 return ERR_PTR(ret); 3671 } 3672 3673 static int 3674 i915_get_ggtt_vma_pages(struct i915_vma *vma) 3675 { 3676 int ret; 3677 3678 /* The vma->pages are only valid within the lifespan of the borrowed 3679 * obj->mm.pages. When the obj->mm.pages sg_table is regenerated, so 3680 * must be the vma->pages. A simple rule is that vma->pages must only 3681 * be accessed when the obj->mm.pages are pinned. 3682 */ 3683 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(vma->obj)); 3684 3685 switch (vma->ggtt_view.type) { 3686 default: 3687 GEM_BUG_ON(vma->ggtt_view.type); 3688 /* fall through */ 3689 case I915_GGTT_VIEW_NORMAL: 3690 vma->pages = vma->obj->mm.pages; 3691 return 0; 3692 3693 case I915_GGTT_VIEW_ROTATED: 3694 vma->pages = 3695 intel_rotate_pages(&vma->ggtt_view.rotated, vma->obj); 3696 break; 3697 3698 case I915_GGTT_VIEW_PARTIAL: 3699 vma->pages = intel_partial_pages(&vma->ggtt_view, vma->obj); 3700 break; 3701 } 3702 3703 ret = 0; 3704 if (unlikely(IS_ERR(vma->pages))) { 3705 ret = PTR_ERR(vma->pages); 3706 vma->pages = NULL; 3707 DRM_ERROR("Failed to get pages for VMA view type %u (%d)!\n", 3708 vma->ggtt_view.type, ret); 3709 } 3710 return ret; 3711 } 3712 3713 /** 3714 * i915_gem_gtt_reserve - reserve a node in an address_space (GTT) 3715 * @vm: the &struct i915_address_space 3716 * @node: the &struct drm_mm_node (typically i915_vma.mode) 3717 * @size: how much space to allocate inside the GTT, 3718 * must be #I915_GTT_PAGE_SIZE aligned 3719 * @offset: where to insert inside the GTT, 3720 * must be #I915_GTT_MIN_ALIGNMENT aligned, and the node 3721 * (@offset + @size) must fit within the address space 3722 * @color: color to apply to node, if this node is not from a VMA, 3723 * color must be #I915_COLOR_UNEVICTABLE 3724 * @flags: control search and eviction behaviour 3725 * 3726 * i915_gem_gtt_reserve() tries to insert the @node at the exact @offset inside 3727 * the address space (using @size and @color). If the @node does not fit, it 3728 * tries to evict any overlapping nodes from the GTT, including any 3729 * neighbouring nodes if the colors do not match (to ensure guard pages between 3730 * differing domains). See i915_gem_evict_for_node() for the gory details 3731 * on the eviction algorithm. #PIN_NONBLOCK may used to prevent waiting on 3732 * evicting active overlapping objects, and any overlapping node that is pinned 3733 * or marked as unevictable will also result in failure. 3734 * 3735 * Returns: 0 on success, -ENOSPC if no suitable hole is found, -EINTR if 3736 * asked to wait for eviction and interrupted. 3737 */ 3738 int i915_gem_gtt_reserve(struct i915_address_space *vm, 3739 struct drm_mm_node *node, 3740 u64 size, u64 offset, unsigned long color, 3741 unsigned int flags) 3742 { 3743 int err; 3744 3745 GEM_BUG_ON(!size); 3746 GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)); 3747 GEM_BUG_ON(!IS_ALIGNED(offset, I915_GTT_MIN_ALIGNMENT)); 3748 GEM_BUG_ON(range_overflows(offset, size, vm->total)); 3749 GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->vm); 3750 GEM_BUG_ON(drm_mm_node_allocated(node)); 3751 3752 node->size = size; 3753 node->start = offset; 3754 node->color = color; 3755 3756 err = drm_mm_reserve_node(&vm->mm, node); 3757 if (err != -ENOSPC) 3758 return err; 3759 3760 if (flags & PIN_NOEVICT) 3761 return -ENOSPC; 3762 3763 err = i915_gem_evict_for_node(vm, node, flags); 3764 if (err == 0) 3765 err = drm_mm_reserve_node(&vm->mm, node); 3766 3767 return err; 3768 } 3769 3770 static u64 random_offset(u64 start, u64 end, u64 len, u64 align) 3771 { 3772 u64 range, addr; 3773 3774 GEM_BUG_ON(range_overflows(start, len, end)); 3775 GEM_BUG_ON(round_up(start, align) > round_down(end - len, align)); 3776 3777 range = round_down(end - len, align) - round_up(start, align); 3778 if (range) { 3779 if (sizeof(unsigned long) == sizeof(u64)) { 3780 addr = get_random_long(); 3781 } else { 3782 addr = get_random_int(); 3783 if (range > U32_MAX) { 3784 addr <<= 32; 3785 addr |= get_random_int(); 3786 } 3787 } 3788 div64_u64_rem(addr, range, &addr); 3789 start += addr; 3790 } 3791 3792 return round_up(start, align); 3793 } 3794 3795 /** 3796 * i915_gem_gtt_insert - insert a node into an address_space (GTT) 3797 * @vm: the &struct i915_address_space 3798 * @node: the &struct drm_mm_node (typically i915_vma.node) 3799 * @size: how much space to allocate inside the GTT, 3800 * must be #I915_GTT_PAGE_SIZE aligned 3801 * @alignment: required alignment of starting offset, may be 0 but 3802 * if specified, this must be a power-of-two and at least 3803 * #I915_GTT_MIN_ALIGNMENT 3804 * @color: color to apply to node 3805 * @start: start of any range restriction inside GTT (0 for all), 3806 * must be #I915_GTT_PAGE_SIZE aligned 3807 * @end: end of any range restriction inside GTT (U64_MAX for all), 3808 * must be #I915_GTT_PAGE_SIZE aligned if not U64_MAX 3809 * @flags: control search and eviction behaviour 3810 * 3811 * i915_gem_gtt_insert() first searches for an available hole into which 3812 * is can insert the node. The hole address is aligned to @alignment and 3813 * its @size must then fit entirely within the [@start, @end] bounds. The 3814 * nodes on either side of the hole must match @color, or else a guard page 3815 * will be inserted between the two nodes (or the node evicted). If no 3816 * suitable hole is found, first a victim is randomly selected and tested 3817 * for eviction, otherwise then the LRU list of objects within the GTT 3818 * is scanned to find the first set of replacement nodes to create the hole. 3819 * Those old overlapping nodes are evicted from the GTT (and so must be 3820 * rebound before any future use). Any node that is currently pinned cannot 3821 * be evicted (see i915_vma_pin()). Similar if the node's VMA is currently 3822 * active and #PIN_NONBLOCK is specified, that node is also skipped when 3823 * searching for an eviction candidate. See i915_gem_evict_something() for 3824 * the gory details on the eviction algorithm. 3825 * 3826 * Returns: 0 on success, -ENOSPC if no suitable hole is found, -EINTR if 3827 * asked to wait for eviction and interrupted. 3828 */ 3829 int i915_gem_gtt_insert(struct i915_address_space *vm, 3830 struct drm_mm_node *node, 3831 u64 size, u64 alignment, unsigned long color, 3832 u64 start, u64 end, unsigned int flags) 3833 { 3834 enum drm_mm_insert_mode mode; 3835 u64 offset; 3836 int err; 3837 3838 lockdep_assert_held(&vm->i915->drm.struct_mutex); 3839 GEM_BUG_ON(!size); 3840 GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)); 3841 GEM_BUG_ON(alignment && !is_power_of_2(alignment)); 3842 GEM_BUG_ON(alignment && !IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT)); 3843 GEM_BUG_ON(start >= end); 3844 GEM_BUG_ON(start > 0 && !IS_ALIGNED(start, I915_GTT_PAGE_SIZE)); 3845 GEM_BUG_ON(end < U64_MAX && !IS_ALIGNED(end, I915_GTT_PAGE_SIZE)); 3846 GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->vm); 3847 GEM_BUG_ON(drm_mm_node_allocated(node)); 3848 3849 if (unlikely(range_overflows(start, size, end))) 3850 return -ENOSPC; 3851 3852 if (unlikely(round_up(start, alignment) > round_down(end - size, alignment))) 3853 return -ENOSPC; 3854 3855 mode = DRM_MM_INSERT_BEST; 3856 if (flags & PIN_HIGH) 3857 mode = DRM_MM_INSERT_HIGHEST; 3858 if (flags & PIN_MAPPABLE) 3859 mode = DRM_MM_INSERT_LOW; 3860 3861 /* We only allocate in PAGE_SIZE/GTT_PAGE_SIZE (4096) chunks, 3862 * so we know that we always have a minimum alignment of 4096. 3863 * The drm_mm range manager is optimised to return results 3864 * with zero alignment, so where possible use the optimal 3865 * path. 3866 */ 3867 BUILD_BUG_ON(I915_GTT_MIN_ALIGNMENT > I915_GTT_PAGE_SIZE); 3868 if (alignment <= I915_GTT_MIN_ALIGNMENT) 3869 alignment = 0; 3870 3871 err = drm_mm_insert_node_in_range(&vm->mm, node, 3872 size, alignment, color, 3873 start, end, mode); 3874 if (err != -ENOSPC) 3875 return err; 3876 3877 if (mode & DRM_MM_INSERT_ONCE) { 3878 err = drm_mm_insert_node_in_range(&vm->mm, node, 3879 size, alignment, color, 3880 start, end, 3881 DRM_MM_INSERT_BEST); 3882 if (err != -ENOSPC) 3883 return err; 3884 } 3885 3886 if (flags & PIN_NOEVICT) 3887 return -ENOSPC; 3888 3889 /* No free space, pick a slot at random. 3890 * 3891 * There is a pathological case here using a GTT shared between 3892 * mmap and GPU (i.e. ggtt/aliasing_ppgtt but not full-ppgtt): 3893 * 3894 * |<-- 256 MiB aperture -->||<-- 1792 MiB unmappable -->| 3895 * (64k objects) (448k objects) 3896 * 3897 * Now imagine that the eviction LRU is ordered top-down (just because 3898 * pathology meets real life), and that we need to evict an object to 3899 * make room inside the aperture. The eviction scan then has to walk 3900 * the 448k list before it finds one within range. And now imagine that 3901 * it has to search for a new hole between every byte inside the memcpy, 3902 * for several simultaneous clients. 3903 * 3904 * On a full-ppgtt system, if we have run out of available space, there 3905 * will be lots and lots of objects in the eviction list! Again, 3906 * searching that LRU list may be slow if we are also applying any 3907 * range restrictions (e.g. restriction to low 4GiB) and so, for 3908 * simplicity and similarilty between different GTT, try the single 3909 * random replacement first. 3910 */ 3911 offset = random_offset(start, end, 3912 size, alignment ?: I915_GTT_MIN_ALIGNMENT); 3913 err = i915_gem_gtt_reserve(vm, node, size, offset, color, flags); 3914 if (err != -ENOSPC) 3915 return err; 3916 3917 /* Randomly selected placement is pinned, do a search */ 3918 err = i915_gem_evict_something(vm, size, alignment, color, 3919 start, end, flags); 3920 if (err) 3921 return err; 3922 3923 return drm_mm_insert_node_in_range(&vm->mm, node, 3924 size, alignment, color, 3925 start, end, DRM_MM_INSERT_EVICT); 3926 } 3927 3928 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 3929 #include "selftests/mock_gtt.c" 3930 #include "selftests/i915_gem_gtt.c" 3931 #endif 3932