Lines Matching refs:pages
35 static inline void sanity_check_pinned_pages(struct page **pages, in sanity_check_pinned_pages() argument
53 for (; npages; npages--, pages++) { in sanity_check_pinned_pages()
54 struct page *page = *pages; in sanity_check_pinned_pages()
290 void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages, in unpin_user_pages_dirty_lock() argument
298 unpin_user_pages(pages, npages); in unpin_user_pages_dirty_lock()
302 sanity_check_pinned_pages(pages, npages); in unpin_user_pages_dirty_lock()
304 folio = gup_folio_next(pages, npages, i, &nr); in unpin_user_pages_dirty_lock()
375 static void gup_fast_unpin_user_pages(struct page **pages, unsigned long npages) in gup_fast_unpin_user_pages() argument
387 folio = gup_folio_next(pages, npages, i, &nr); in gup_fast_unpin_user_pages()
401 void unpin_user_pages(struct page **pages, unsigned long npages) in unpin_user_pages() argument
415 sanity_check_pinned_pages(pages, npages); in unpin_user_pages()
417 if (!pages[i]) { in unpin_user_pages()
421 folio = gup_folio_next(pages, npages, i, &nr); in unpin_user_pages()
490 struct page **pages) in record_subpages() argument
497 pages[nr] = nth_page(start_page, nr); in record_subpages()
1426 unsigned int gup_flags, struct page **pages, in __get_user_pages() argument
1438 VM_BUG_ON(!!pages != !!(gup_flags & (FOLL_GET | FOLL_PIN))); in __get_user_pages()
1466 pages ? &page : NULL); in __get_user_pages()
1516 if (pages) { in __get_user_pages()
1529 if (pages) { in __get_user_pages()
1564 pages[i + j] = subpage; in __get_user_pages()
1720 struct page **pages, in __get_user_pages_locked() argument
1755 if (pages && !(flags & FOLL_PIN)) in __get_user_pages_locked()
1760 ret = __get_user_pages(mm, start, nr_pages, flags, pages, in __get_user_pages_locked()
1793 if (likely(pages)) in __get_user_pages_locked()
1794 pages += ret; in __get_user_pages_locked()
1825 pages, locked); in __get_user_pages_locked()
1841 if (likely(pages)) in __get_user_pages_locked()
1842 pages++; in __get_user_pages_locked()
2052 unsigned long nr_pages, struct page **pages, in __get_user_pages_locked() argument
2092 if (pages) { in __get_user_pages_locked()
2093 pages[i] = virt_to_page((void *)start); in __get_user_pages_locked()
2094 if (pages[i]) in __get_user_pages_locked()
2095 get_page(pages[i]); in __get_user_pages_locked()
2295 struct page **pages; member
2307 return page_folio(pofs->pages[i]); in pofs_get_folio()
2320 unpin_user_pages(pofs->pages, pofs->nr_entries); in pofs_unpin()
2493 struct page **pages) in check_and_migrate_movable_pages() argument
2496 .pages = pages, in check_and_migrate_movable_pages()
2505 struct page **pages) in check_and_migrate_movable_pages() argument
2524 struct page **pages, in __gup_longterm_locked() argument
2532 return __get_user_pages_locked(mm, start, nr_pages, pages, in __gup_longterm_locked()
2538 pages, locked, in __gup_longterm_locked()
2546 rc = check_and_migrate_movable_pages(nr_pinned_pages, pages); in __gup_longterm_locked()
2556 static bool is_valid_gup_args(struct page **pages, int *locked, in is_valid_gup_args() argument
2590 if (WARN_ON_ONCE((gup_flags & (FOLL_GET | FOLL_PIN)) && !pages)) in is_valid_gup_args()
2661 unsigned int gup_flags, struct page **pages, in get_user_pages_remote() argument
2666 if (!is_valid_gup_args(pages, locked, &gup_flags, in get_user_pages_remote()
2670 return __get_user_pages_locked(mm, start, nr_pages, pages, in get_user_pages_remote()
2679 unsigned int gup_flags, struct page **pages, in get_user_pages_remote() argument
2701 unsigned int gup_flags, struct page **pages) in get_user_pages() argument
2705 if (!is_valid_gup_args(pages, NULL, &gup_flags, FOLL_TOUCH)) in get_user_pages()
2708 return __get_user_pages_locked(current->mm, start, nr_pages, pages, in get_user_pages()
2729 struct page **pages, unsigned int gup_flags) in get_user_pages_unlocked() argument
2733 if (!is_valid_gup_args(pages, NULL, &gup_flags, in get_user_pages_unlocked()
2737 return __get_user_pages_locked(current->mm, start, nr_pages, pages, in get_user_pages_unlocked()
2866 unsigned int flags, struct page **pages) in gup_fast_undo_dev_pagemap() argument
2869 struct folio *folio = page_folio(pages[--(*nr)]); in gup_fast_undo_dev_pagemap()
2897 unsigned long end, unsigned int flags, struct page **pages, in gup_fast_pte_range() argument
2931 gup_fast_undo_dev_pagemap(nr, nr_start, flags, pages); in gup_fast_pte_range()
2974 pages[*nr] = page; in gup_fast_pte_range()
2998 unsigned long end, unsigned int flags, struct page **pages, in gup_fast_pte_range() argument
3007 unsigned long end, unsigned int flags, struct page **pages, int *nr) in gup_fast_devmap_leaf() argument
3018 gup_fast_undo_dev_pagemap(nr, nr_start, flags, pages); in gup_fast_devmap_leaf()
3023 gup_fast_undo_dev_pagemap(nr, nr_start, flags, pages); in gup_fast_devmap_leaf()
3029 gup_fast_undo_dev_pagemap(nr, nr_start, flags, pages); in gup_fast_devmap_leaf()
3033 pages[*nr] = page; in gup_fast_devmap_leaf()
3043 unsigned long end, unsigned int flags, struct page **pages, in gup_fast_devmap_pmd_leaf() argument
3050 if (!gup_fast_devmap_leaf(fault_pfn, addr, end, flags, pages, nr)) in gup_fast_devmap_pmd_leaf()
3054 gup_fast_undo_dev_pagemap(nr, nr_start, flags, pages); in gup_fast_devmap_pmd_leaf()
3061 unsigned long end, unsigned int flags, struct page **pages, in gup_fast_devmap_pud_leaf() argument
3068 if (!gup_fast_devmap_leaf(fault_pfn, addr, end, flags, pages, nr)) in gup_fast_devmap_pud_leaf()
3072 gup_fast_undo_dev_pagemap(nr, nr_start, flags, pages); in gup_fast_devmap_pud_leaf()
3079 unsigned long end, unsigned int flags, struct page **pages, in gup_fast_devmap_pmd_leaf() argument
3087 unsigned long end, unsigned int flags, struct page **pages, in gup_fast_devmap_pud_leaf() argument
3096 unsigned long end, unsigned int flags, struct page **pages, in gup_fast_pmd_leaf() argument
3113 pages, nr); in gup_fast_pmd_leaf()
3117 refs = record_subpages(page, PMD_SIZE, addr, end, pages + *nr); in gup_fast_pmd_leaf()
3143 unsigned long end, unsigned int flags, struct page **pages, in gup_fast_pud_leaf() argument
3160 pages, nr); in gup_fast_pud_leaf()
3164 refs = record_subpages(page, PUD_SIZE, addr, end, pages + *nr); in gup_fast_pud_leaf()
3191 unsigned long end, unsigned int flags, struct page **pages, in gup_fast_pgd_leaf() argument
3204 refs = record_subpages(page, PGDIR_SIZE, addr, end, pages + *nr); in gup_fast_pgd_leaf()
3231 unsigned long end, unsigned int flags, struct page **pages, in gup_fast_pmd_range() argument
3251 pages, nr)) in gup_fast_pmd_range()
3255 pages, nr)) in gup_fast_pmd_range()
3263 unsigned long end, unsigned int flags, struct page **pages, in gup_fast_pud_range() argument
3278 pages, nr)) in gup_fast_pud_range()
3281 pages, nr)) in gup_fast_pud_range()
3289 unsigned long end, unsigned int flags, struct page **pages, in gup_fast_p4d_range() argument
3304 pages, nr)) in gup_fast_p4d_range()
3312 unsigned int flags, struct page **pages, int *nr) in gup_fast_pgd_range() argument
3326 pages, nr)) in gup_fast_pgd_range()
3329 pages, nr)) in gup_fast_pgd_range()
3335 unsigned int flags, struct page **pages, int *nr) in gup_fast_pgd_range() argument
3352 unsigned int gup_flags, struct page **pages) in gup_fast() argument
3380 gup_fast_pgd_range(start, end, gup_flags, pages, &nr_pinned); in gup_fast()
3389 gup_fast_unpin_user_pages(pages, nr_pinned); in gup_fast()
3392 sanity_check_pinned_pages(pages, nr_pinned); in gup_fast()
3399 unsigned int gup_flags, struct page **pages) in gup_fast_fallback() argument
3427 nr_pinned = gup_fast(start, end, gup_flags, pages); in gup_fast_fallback()
3433 pages += nr_pinned; in gup_fast_fallback()
3435 pages, &locked, in gup_fast_fallback()
3468 unsigned int gup_flags, struct page **pages) in get_user_pages_fast_only() argument
3477 if (!is_valid_gup_args(pages, NULL, &gup_flags, in get_user_pages_fast_only()
3481 return gup_fast_fallback(start, nr_pages, gup_flags, pages); in get_user_pages_fast_only()
3502 unsigned int gup_flags, struct page **pages) in get_user_pages_fast() argument
3510 if (!is_valid_gup_args(pages, NULL, &gup_flags, FOLL_GET)) in get_user_pages_fast()
3512 return gup_fast_fallback(start, nr_pages, gup_flags, pages); in get_user_pages_fast()
3536 unsigned int gup_flags, struct page **pages) in pin_user_pages_fast() argument
3538 if (!is_valid_gup_args(pages, NULL, &gup_flags, FOLL_PIN)) in pin_user_pages_fast()
3540 return gup_fast_fallback(start, nr_pages, gup_flags, pages); in pin_user_pages_fast()
3569 unsigned int gup_flags, struct page **pages, in pin_user_pages_remote() argument
3574 if (!is_valid_gup_args(pages, locked, &gup_flags, in pin_user_pages_remote()
3577 return __gup_longterm_locked(mm, start, nr_pages, pages, in pin_user_pages_remote()
3602 unsigned int gup_flags, struct page **pages) in pin_user_pages() argument
3606 if (!is_valid_gup_args(pages, NULL, &gup_flags, FOLL_PIN)) in pin_user_pages()
3609 pages, &locked, gup_flags); in pin_user_pages()
3622 struct page **pages, unsigned int gup_flags) in pin_user_pages_unlocked() argument
3626 if (!is_valid_gup_args(pages, NULL, &gup_flags, in pin_user_pages_unlocked()
3630 return __gup_longterm_locked(current->mm, start, nr_pages, pages, in pin_user_pages_unlocked()