Lines Matching refs:page

86 linux_page_address(const struct page *page)  in linux_page_address()  argument
89 if (page->object != kernel_object) { in linux_page_address()
91 ((void *)(uintptr_t)PHYS_TO_DMAP(page_to_phys(page))) : in linux_page_address()
95 IDX_TO_OFF(page->pindex))); in linux_page_address()
98 struct page *
101 struct page *page; in linux_alloc_pages() local
110 page = vm_page_alloc_noobj(req); in linux_alloc_pages()
111 if (page == NULL) in linux_alloc_pages()
117 page = vm_page_alloc_noobj_contig(req, npages, 0, pmax, in linux_alloc_pages()
119 if (page == NULL) { in linux_alloc_pages()
141 page = virt_to_page((void *)vaddr); in linux_alloc_pages()
143 KASSERT(vaddr == (vm_offset_t)page_address(page), in linux_alloc_pages()
147 return (page); in linux_alloc_pages()
159 linux_free_pages(struct page *page, unsigned int order) in linux_free_pages() argument
166 vm_page_t pgo = page + x; in linux_free_pages()
190 vaddr = (vm_offset_t)page_address(page); in linux_free_pages()
201 CTASSERT(offsetof(struct folio, page) == 0); in linux_release_pages()
229 vm_page_t page; in linux_free_kmem() local
231 page = PHYS_TO_VM_PAGE(DMAP_TO_PHYS(addr)); in linux_free_kmem()
232 linux_free_pages(page, order); in linux_free_kmem()
238 int write, struct page **pages) in linux_get_user_pages_internal()
252 struct page **pages) in __get_user_pages_fast()
296 struct page **pages, struct vm_area_struct **vmas) in get_user_pages_remote()
307 unsigned int gup_flags, struct page **pages) in lkpi_get_user_pages()
329 vm_page_t page; in lkpi_vmf_insert_pfn_prot_locked() local
340 page = vm_page_grab_iter(vm_obj, pindex, VM_ALLOC_NOCREAT, &pages); in lkpi_vmf_insert_pfn_prot_locked()
341 if (page == NULL) { in lkpi_vmf_insert_pfn_prot_locked()
342 page = PHYS_TO_VM_PAGE(IDX_TO_OFF(pfn)); in lkpi_vmf_insert_pfn_prot_locked()
343 if (!vm_page_busy_acquire(page, VM_ALLOC_WAITFAIL)) { in lkpi_vmf_insert_pfn_prot_locked()
347 if (page->object != NULL) { in lkpi_vmf_insert_pfn_prot_locked()
348 tmp_obj = page->object; in lkpi_vmf_insert_pfn_prot_locked()
349 vm_page_xunbusy(page); in lkpi_vmf_insert_pfn_prot_locked()
352 if (page->object == tmp_obj && in lkpi_vmf_insert_pfn_prot_locked()
353 vm_page_busy_acquire(page, VM_ALLOC_WAITFAIL)) { in lkpi_vmf_insert_pfn_prot_locked()
354 KASSERT(page->object == tmp_obj, in lkpi_vmf_insert_pfn_prot_locked()
356 KASSERT((page->oflags & VPO_UNMANAGED) == 0, in lkpi_vmf_insert_pfn_prot_locked()
358 vm_pager_page_unswapped(page); in lkpi_vmf_insert_pfn_prot_locked()
359 if (pmap_page_is_mapped(page)) { in lkpi_vmf_insert_pfn_prot_locked()
360 vm_page_xunbusy(page); in lkpi_vmf_insert_pfn_prot_locked()
367 vm_page_remove(page); in lkpi_vmf_insert_pfn_prot_locked()
374 if (vm_page_iter_insert(page, vm_obj, pindex, &pages) != 0) { in lkpi_vmf_insert_pfn_prot_locked()
375 vm_page_xunbusy(page); in lkpi_vmf_insert_pfn_prot_locked()
378 vm_page_valid(page); in lkpi_vmf_insert_pfn_prot_locked()
380 pmap_page_set_memattr(page, pgprot2cachemode(prot)); in lkpi_vmf_insert_pfn_prot_locked()
559 vm_page_t page; in linuxkpi_page_frag_free() local
561 page = virt_to_page(addr); in linuxkpi_page_frag_free()
562 linux_free_pages(page, 0); in linuxkpi_page_frag_free()
566 linuxkpi__page_frag_cache_drain(struct page *page, size_t count __unused) in linuxkpi__page_frag_cache_drain() argument
569 linux_free_pages(page, 0); in linuxkpi__page_frag_cache_drain()