Lines Matching full:page

86 linux_page_address(const struct page *page)  in linux_page_address()  argument
89 if (page->object != kernel_object) { in linux_page_address()
91 ((void *)(uintptr_t)PHYS_TO_DMAP(page_to_phys(page))) : in linux_page_address()
95 IDX_TO_OFF(page->pindex))); in linux_page_address()
98 struct page *
101 struct page *page; in linux_alloc_pages() local
111 page = vm_page_alloc_noobj(req); in linux_alloc_pages()
112 if (page == NULL) in linux_alloc_pages()
122 page = vm_page_alloc_noobj_contig(req, npages, 0, pmax, in linux_alloc_pages()
124 if (page == NULL) { in linux_alloc_pages()
146 page = virt_to_page((void *)vaddr); in linux_alloc_pages()
148 KASSERT(vaddr == (vm_offset_t)page_address(page), in linux_alloc_pages()
149 ("Page address mismatch")); in linux_alloc_pages()
152 return (page); in linux_alloc_pages()
164 linux_free_pages(struct page *page, unsigned int order) in linux_free_pages() argument
171 vm_page_t pgo = page + x; in linux_free_pages()
174 * The "free page" function is used in several in linux_free_pages()
182 * That's why we need to check if the page is managed in linux_free_pages()
195 vaddr = (vm_offset_t)page_address(page); in linux_free_pages()
206 CTASSERT(offsetof(struct folio, page) == 0); in linux_release_pages()
229 ("%s: addr %p is not page aligned", __func__, (void *)addr)); in linux_free_kmem()
234 vm_page_t page; in linux_free_kmem() local
236 page = PHYS_TO_VM_PAGE(DMAP_TO_PHYS(addr)); in linux_free_kmem()
237 linux_free_pages(page, order); in linux_free_kmem()
243 int write, struct page **pages) in linux_get_user_pages_internal()
257 struct page **pages) in __get_user_pages_fast()
284 * Explicitly dirty the physical page. Otherwise, the in __get_user_pages_fast()
301 struct page **pages, struct vm_area_struct **vmas) in get_user_pages_remote()
312 unsigned int gup_flags, struct page **pages) in lkpi_get_user_pages()
334 vm_page_t page; in lkpi_vmf_insert_pfn_prot_locked() local
345 page = vm_page_grab_iter(vm_obj, pindex, VM_ALLOC_NOCREAT, &pages); in lkpi_vmf_insert_pfn_prot_locked()
346 if (page == NULL) { in lkpi_vmf_insert_pfn_prot_locked()
347 page = PHYS_TO_VM_PAGE(IDX_TO_OFF(pfn)); in lkpi_vmf_insert_pfn_prot_locked()
348 if (!vm_page_busy_acquire(page, VM_ALLOC_WAITFAIL)) { in lkpi_vmf_insert_pfn_prot_locked()
352 if (page->object != NULL) { in lkpi_vmf_insert_pfn_prot_locked()
353 tmp_obj = page->object; in lkpi_vmf_insert_pfn_prot_locked()
354 vm_page_xunbusy(page); in lkpi_vmf_insert_pfn_prot_locked()
357 if (page->object == tmp_obj && in lkpi_vmf_insert_pfn_prot_locked()
358 vm_page_busy_acquire(page, VM_ALLOC_WAITFAIL)) { in lkpi_vmf_insert_pfn_prot_locked()
359 KASSERT(page->object == tmp_obj, in lkpi_vmf_insert_pfn_prot_locked()
360 ("page has changed identity")); in lkpi_vmf_insert_pfn_prot_locked()
361 KASSERT((page->oflags & VPO_UNMANAGED) == 0, in lkpi_vmf_insert_pfn_prot_locked()
362 ("page does not belong to shmem")); in lkpi_vmf_insert_pfn_prot_locked()
363 vm_pager_page_unswapped(page); in lkpi_vmf_insert_pfn_prot_locked()
364 if (pmap_page_is_mapped(page)) { in lkpi_vmf_insert_pfn_prot_locked()
365 vm_page_xunbusy(page); in lkpi_vmf_insert_pfn_prot_locked()
367 printf("%s: page rename failed: page " in lkpi_vmf_insert_pfn_prot_locked()
372 vm_page_remove(page); in lkpi_vmf_insert_pfn_prot_locked()
379 if (vm_page_iter_insert(page, vm_obj, pindex, &pages) != 0) { in lkpi_vmf_insert_pfn_prot_locked()
380 vm_page_xunbusy(page); in lkpi_vmf_insert_pfn_prot_locked()
383 vm_page_valid(page); in lkpi_vmf_insert_pfn_prot_locked()
385 pmap_page_set_memattr(page, pgprot2cachemode(prot)); in lkpi_vmf_insert_pfn_prot_locked()
532 * We only support up-to 1 single page as fragment size and we will
533 * always return a full page. This may be wasteful on small objects
534 * but the only known consumer (mt76) is either asking for a half-page
535 * or a full page. If this was to become a problem we can implement
564 vm_page_t page; in linuxkpi_page_frag_free() local
566 page = virt_to_page(addr); in linuxkpi_page_frag_free()
567 linux_free_pages(page, 0); in linuxkpi_page_frag_free()
571 linuxkpi__page_frag_cache_drain(struct page *page, size_t count __unused) in linuxkpi__page_frag_cache_drain() argument
574 linux_free_pages(page, 0); in linuxkpi__page_frag_cache_drain()