Lines Matching full:pages
22 struct sg_table *pages) in __i915_gem_object_set_pages() argument
34 /* Make the pages coherent with the GPU (flushing any swapin). */ in __i915_gem_object_set_pages()
39 drm_clflush_sg(pages); in __i915_gem_object_set_pages()
43 obj->mm.get_page.sg_pos = pages->sgl; in __i915_gem_object_set_pages()
45 obj->mm.get_dma_page.sg_pos = pages->sgl; in __i915_gem_object_set_pages()
48 obj->mm.pages = pages; in __i915_gem_object_set_pages()
50 obj->mm.page_sizes.phys = i915_sg_dma_sizes(pages->sgl); in __i915_gem_object_set_pages()
58 * 64K or 4K pages, although in practice this will depend on a number of in __i915_gem_object_set_pages()
119 /* Ensure that the associated pages are gathered from the backing storage
122 * i915_gem_object_unpin_pages() - once the pages are no longer referenced
123 * either as a result of memory pressure (reaping pages under the shrinker)
214 struct sg_table *pages; in __i915_gem_object_unset_pages() local
218 pages = fetch_and_zero(&obj->mm.pages); in __i915_gem_object_unset_pages()
219 if (IS_ERR_OR_NULL(pages)) in __i915_gem_object_unset_pages()
220 return pages; in __i915_gem_object_unset_pages()
238 return pages; in __i915_gem_object_unset_pages()
243 struct sg_table *pages; in __i915_gem_object_put_pages() local
258 pages = __i915_gem_object_unset_pages(obj); in __i915_gem_object_put_pages()
262 * NULL pages. In the future, when we have more asynchronous in __i915_gem_object_put_pages()
266 if (!IS_ERR_OR_NULL(pages)) in __i915_gem_object_put_pages()
267 obj->ops->put_pages(obj, pages); in __i915_gem_object_put_pages()
277 struct page *stack[32], **pages = stack, *page; in i915_gem_object_map_page() local
289 * vmap) to provide virtual mappings of the high pages. in i915_gem_object_map_page()
304 if (n_pages == 1 && !PageHighMem(sg_page(obj->mm.pages->sgl))) in i915_gem_object_map_page()
305 return page_address(sg_page(obj->mm.pages->sgl)); in i915_gem_object_map_page()
315 pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL); in i915_gem_object_map_page()
316 if (!pages) in i915_gem_object_map_page()
321 for_each_sgt_page(page, iter, obj->mm.pages) in i915_gem_object_map_page()
322 pages[i++] = page; in i915_gem_object_map_page()
323 vaddr = vmap(pages, n_pages, 0, pgprot); in i915_gem_object_map_page()
324 if (pages != stack) in i915_gem_object_map_page()
325 kvfree(pages); in i915_gem_object_map_page()
351 for_each_sgt_daddr(addr, iter, obj->mm.pages) in i915_gem_object_map_pfn()
361 struct page **pages; member
379 struct page **pages; in i915_gem_object_panic_pages() local
383 pages = kmalloc_array(n_pages, sizeof(*pages), GFP_ATOMIC); in i915_gem_object_panic_pages()
384 if (!pages) in i915_gem_object_panic_pages()
388 for_each_sgt_page(page, iter, obj->mm.pages) in i915_gem_object_panic_pages()
389 pages[i++] = page; in i915_gem_object_panic_pages()
390 return pages; in i915_gem_object_panic_pages()
403 * The scanout buffer pages are not mapped, so for each pixel,
426 kmap_local_page_try_from_panic(panic->pages[panic->page]); in i915_gem_object_panic_page_set_pixel()
445 * Use current vaddr if it exists, or setup a list of pages.
467 panic->pages = i915_gem_object_panic_pages(obj); in i915_gem_object_panic_setup()
468 if (!panic->pages) in i915_gem_object_panic_setup()
481 kfree(panic->pages); in i915_gem_object_panic_finish()
482 panic->pages = NULL; in i915_gem_object_panic_finish()
485 /* get, pin, and map the pages of the object into kernel space */
529 * pages should be allocated and mapped as write-combined only. in i915_gem_object_pin_map()
632 * We allow removing the mapping from underneath pinned pages! in __i915_gem_object_release_map()
688 * individual pages from this range, cancel updating the in __i915_gem_object_page_iter_get_sg()