Lines Matching refs:page

34 static inline struct page *dma_direct_to_page(struct device *dev,  in dma_direct_to_page()
99 static void __dma_direct_free_pages(struct device *dev, struct page *page, in __dma_direct_free_pages() argument
102 if (swiotlb_free(dev, page, size)) in __dma_direct_free_pages()
104 dma_free_contiguous(dev, page, size); in __dma_direct_free_pages()
107 static struct page *dma_direct_alloc_swiotlb(struct device *dev, size_t size) in dma_direct_alloc_swiotlb()
109 struct page *page = swiotlb_alloc(dev, size); in dma_direct_alloc_swiotlb() local
111 if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) { in dma_direct_alloc_swiotlb()
112 swiotlb_free(dev, page, size); in dma_direct_alloc_swiotlb()
116 return page; in dma_direct_alloc_swiotlb()
119 static struct page *__dma_direct_alloc_pages(struct device *dev, size_t size, in __dma_direct_alloc_pages()
123 struct page *page; in __dma_direct_alloc_pages() local
132 page = dma_alloc_contiguous(dev, size, gfp); in __dma_direct_alloc_pages()
133 if (page) { in __dma_direct_alloc_pages()
134 if (dma_coherent_ok(dev, page_to_phys(page), size) && in __dma_direct_alloc_pages()
135 (allow_highmem || !PageHighMem(page))) in __dma_direct_alloc_pages()
136 return page; in __dma_direct_alloc_pages()
138 dma_free_contiguous(dev, page, size); in __dma_direct_alloc_pages()
141 while ((page = alloc_pages_node(node, gfp, get_order(size))) in __dma_direct_alloc_pages()
142 && !dma_coherent_ok(dev, page_to_phys(page), size)) { in __dma_direct_alloc_pages()
143 __free_pages(page, get_order(size)); in __dma_direct_alloc_pages()
155 return page; in __dma_direct_alloc_pages()
170 struct page *page; in dma_direct_alloc_from_pool() local
178 page = dma_alloc_from_pool(dev, size, &ret, gfp, dma_coherent_ok); in dma_direct_alloc_from_pool()
179 if (!page) in dma_direct_alloc_from_pool()
181 *dma_handle = phys_to_dma_direct(dev, page_to_phys(page)); in dma_direct_alloc_from_pool()
188 struct page *page; in dma_direct_alloc_no_mapping() local
190 page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO, true); in dma_direct_alloc_no_mapping()
191 if (!page) in dma_direct_alloc_no_mapping()
195 if (!PageHighMem(page)) in dma_direct_alloc_no_mapping()
196 arch_dma_prep_coherent(page, size); in dma_direct_alloc_no_mapping()
199 *dma_handle = phys_to_dma_direct(dev, page_to_phys(page)); in dma_direct_alloc_no_mapping()
200 return page; in dma_direct_alloc_no_mapping()
207 struct page *page; in dma_direct_alloc() local
254 page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO, true); in dma_direct_alloc()
255 if (!page) in dma_direct_alloc()
263 if (PageHighMem(page)) { in dma_direct_alloc()
275 arch_dma_prep_coherent(page, size); in dma_direct_alloc()
278 ret = dma_common_contiguous_remap(page, size, prot, in dma_direct_alloc()
283 ret = page_address(page); in dma_direct_alloc()
291 arch_dma_prep_coherent(page, size); in dma_direct_alloc()
297 *dma_handle = phys_to_dma_direct(dev, page_to_phys(page)); in dma_direct_alloc()
301 if (dma_set_encrypted(dev, page_address(page), size)) in dma_direct_alloc()
304 __dma_direct_free_pages(dev, page, size); in dma_direct_alloc()
353 struct page *dma_direct_alloc_pages(struct device *dev, size_t size, in dma_direct_alloc_pages()
356 struct page *page; in dma_direct_alloc_pages() local
362 page = __dma_direct_alloc_pages(dev, size, gfp, false); in dma_direct_alloc_pages()
363 if (!page) in dma_direct_alloc_pages()
366 ret = page_address(page); in dma_direct_alloc_pages()
370 *dma_handle = phys_to_dma_direct(dev, page_to_phys(page)); in dma_direct_alloc_pages()
371 return page; in dma_direct_alloc_pages()
377 struct page *page, dma_addr_t dma_addr, in dma_direct_free_pages() argument
380 void *vaddr = page_address(page); in dma_direct_free_pages()
389 __dma_direct_free_pages(dev, page, size); in dma_direct_free_pages()
505 struct page *page = dma_direct_to_page(dev, dma_addr); in dma_direct_get_sgtable() local
510 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); in dma_direct_get_sgtable()