Lines Matching defs:pages

24 				 struct sg_table *pages)
36 /* Make the pages coherent with the GPU (flushing any swapin). */
41 drm_clflush_sg(pages);
45 obj->mm.get_page.sg_pos = pages->sgl;
47 obj->mm.get_dma_page.sg_pos = pages->sgl;
50 obj->mm.pages = pages;
52 obj->mm.page_sizes.phys = i915_sg_dma_sizes(pages->sgl);
60 * 64K or 4K pages, although in practice this will depend on a number of
121 /* Ensure that the associated pages are gathered from the backing storage
124 * i915_gem_object_unpin_pages() - once the pages are no longer referenced
125 * either as a result of memory pressure (reaping pages under the shrinker)
216 struct sg_table *pages;
220 pages = fetch_and_zero(&obj->mm.pages);
221 if (IS_ERR_OR_NULL(pages))
222 return pages;
240 return pages;
245 struct sg_table *pages;
260 pages = __i915_gem_object_unset_pages(obj);
264 * NULL pages. In the future, when we have more asynchronous
268 if (!IS_ERR_OR_NULL(pages))
269 obj->ops->put_pages(obj, pages);
279 struct page *stack[32], **pages = stack, *page;
291 * vmap) to provide virtual mappings of the high pages.
306 if (n_pages == 1 && !PageHighMem(sg_page(obj->mm.pages->sgl)))
307 return page_address(sg_page(obj->mm.pages->sgl));
317 pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL);
318 if (!pages)
323 for_each_sgt_page(page, iter, obj->mm.pages)
324 pages[i++] = page;
325 vaddr = vmap(pages, n_pages, 0, pgprot);
326 if (pages != stack)
327 kvfree(pages);
353 for_each_sgt_daddr(addr, iter, obj->mm.pages)
363 struct page **pages;
381 struct page **pages;
385 pages = kmalloc_array(n_pages, sizeof(*pages), GFP_ATOMIC);
386 if (!pages)
390 for_each_sgt_page(page, iter, obj->mm.pages)
391 pages[i++] = page;
392 return pages;
405 * The scanout buffer pages are not mapped, so for each pixel,
428 kmap_local_page_try_from_panic(panic->pages[panic->page]);
447 * Use current vaddr if it exists, or setup a list of pages.
469 panic->pages = i915_gem_object_panic_pages(obj);
470 if (!panic->pages)
483 kfree(panic->pages);
484 panic->pages = NULL;
487 /* get, pin, and map the pages of the object into kernel space */
531 * pages should be allocated and mapped as write-combined only.
634 * We allow removing the mapping from underneath pinned pages!
690 * individual pages from this range, cancel updating the