/linux/drivers/iommu/iommufd/ |
H A D | pages.c | 164 static void iopt_pages_add_npinned(struct iopt_pages *pages, size_t npages) in iopt_pages_add_npinned() argument 168 rc = check_add_overflow(pages->npinned, npages, &pages->npinned); in iopt_pages_add_npinned() 170 WARN_ON(rc || pages->npinned > pages->npages); in iopt_pages_add_npinned() 173 static void iopt_pages_sub_npinned(struct iopt_pages *pages, size_t npages) in iopt_pages_sub_npinned() argument 177 rc = check_sub_overflow(pages->npinned, npages, &pages->npinned); in iopt_pages_sub_npinned() 179 WARN_ON(rc || pages->npinned > pages->npages); in iopt_pages_sub_npinned() 182 static void iopt_pages_err_unpin(struct iopt_pages *pages, in iopt_pages_err_unpin() argument 190 iopt_pages_sub_npinned(pages, npages); in iopt_pages_err_unpin() 250 static struct iopt_area *iopt_pages_find_domain_area(struct iopt_pages *pages, in iopt_pages_find_domain_area() argument 255 node = interval_tree_iter_first(&pages->domains_itree, index, index); in iopt_pages_find_domain_area() [all …]
|
H A D | io_pagetable.c | 24 struct iopt_pages *pages; member 43 if (!iter->area->pages) { in iopt_area_contig_init() 66 !iter->area->pages) { in iopt_area_contig_next() 199 struct iopt_pages *pages, unsigned long iova, in iopt_insert_area() argument 205 if ((iommu_prot & IOMMU_WRITE) && !pages->writable) in iopt_insert_area() 221 if (WARN_ON(area->pages_node.last >= pages->npages)) in iopt_insert_area() 271 switch (elm->pages->type) { in iopt_alloc_area_pages() 273 start = elm->start_byte + (uintptr_t)elm->pages->uptr; in iopt_alloc_area_pages() 276 start = elm->start_byte + elm->pages->start; in iopt_alloc_area_pages() 299 rc = iopt_insert_area(iopt, elm->area, elm->pages, iova, in iopt_alloc_area_pages() [all …]
|
H A D | ioas.c | 442 static bool need_charge_update(struct iopt_pages *pages) in need_charge_update() argument 444 switch (pages->account_mode) { in need_charge_update() 448 return pages->source_mm != current->mm; in need_charge_update() 454 return (pages->source_user != current_user()) || in need_charge_update() 455 (pages->source_mm != current->mm); in need_charge_update() 495 static void change_mm(struct iopt_pages *pages) in change_mm() argument 497 struct task_struct *old_task = pages->source_task; in change_mm() 498 struct user_struct *old_user = pages->source_user; in change_mm() 499 struct mm_struct *old_mm = pages->source_mm; in change_mm() 501 pages->source_mm = current->mm; in change_mm() [all …]
|
/linux/net/ceph/ |
H A D | pagevec.c | 13 void ceph_put_page_vector(struct page **pages, int num_pages, bool dirty) in ceph_put_page_vector() argument 19 set_page_dirty_lock(pages[i]); in ceph_put_page_vector() 20 put_page(pages[i]); in ceph_put_page_vector() 22 kvfree(pages); in ceph_put_page_vector() 26 void ceph_release_page_vector(struct page **pages, int num_pages) in ceph_release_page_vector() argument 31 __free_pages(pages[i], 0); in ceph_release_page_vector() 32 kfree(pages); in ceph_release_page_vector() 41 struct page **pages; in ceph_alloc_page_vector() local 44 pages = kmalloc_array(num_pages, sizeof(*pages), flags); in ceph_alloc_page_vector() 45 if (!pages) in ceph_alloc_page_vector() [all …]
|
/linux/io_uring/ |
H A D | memmap.c | 17 static void *io_mem_alloc_compound(struct page **pages, int nr_pages, in io_mem_alloc_compound() argument 34 pages[i] = page + i; in io_mem_alloc_compound() 39 static void *io_mem_alloc_single(struct page **pages, int nr_pages, size_t size, in io_mem_alloc_single() argument 46 pages[i] = alloc_page(gfp); in io_mem_alloc_single() 47 if (!pages[i]) in io_mem_alloc_single() 51 ret = vmap(pages, nr_pages, VM_MAP, PAGE_KERNEL); in io_mem_alloc_single() 56 put_page(pages[i]); in io_mem_alloc_single() 64 struct page **pages; in io_pages_map() local 69 pages = kvmalloc_array(nr_pages, sizeof(struct page *), gfp); in io_pages_map() 70 if (!pages) in io_pages_map() [all …]
|
/linux/mm/ |
H A D | percpu-vm.c | 23 * pcpu_get_pages - get temp pages array 30 * Pointer to temp pages array on success. 34 static struct page **pages; in pcpu_get_pages() local 35 size_t pages_size = pcpu_nr_units * pcpu_unit_pages * sizeof(pages[0]); in pcpu_get_pages() 39 if (!pages) in pcpu_get_pages() 40 pages = pcpu_mem_zalloc(pages_size, GFP_KERNEL); in pcpu_get_pages() 41 return pages; in pcpu_get_pages() 45 * pcpu_free_pages - free pages which were allocated for @chunk 46 * @chunk: chunk pages were allocated for 47 * @pages 55 pcpu_free_pages(struct pcpu_chunk * chunk,struct page ** pages,int page_start,int page_end) pcpu_free_pages() argument 83 pcpu_alloc_pages(struct pcpu_chunk * chunk,struct page ** pages,int page_start,int page_end,gfp_t gfp) pcpu_alloc_pages() argument 154 pcpu_unmap_pages(struct pcpu_chunk * chunk,struct page ** pages,int page_start,int page_end) pcpu_unmap_pages() argument 193 __pcpu_map_pages(unsigned long addr,struct page ** pages,int nr_pages) __pcpu_map_pages() argument 215 pcpu_map_pages(struct pcpu_chunk * chunk,struct page ** pages,int page_start,int page_end) pcpu_map_pages() argument 279 struct page **pages; pcpu_populate_chunk() local 315 struct page **pages; pcpu_depopulate_chunk() local [all...] |
H A D | gup_test.c | 10 static void put_back_pages(unsigned int cmd, struct page **pages, in put_back_pages() argument 19 put_page(pages[i]); in put_back_pages() 25 unpin_user_pages(pages, nr_pages); in put_back_pages() 29 unpin_user_pages(pages, nr_pages); in put_back_pages() 32 put_page(pages[i]); in put_back_pages() 39 static void verify_dma_pinned(unsigned int cmd, struct page **pages, in verify_dma_pinned() argument 50 folio = page_folio(pages[i]); in verify_dma_pinned() 69 static void dump_pages_test(struct gup_test *gup, struct page **pages, in dump_pages_test() argument 94 dump_page(pages[index_to_dump], in dump_pages_test() 106 struct page **pages; in __gup_test_ioctl() local [all …]
|
H A D | gup.c | 35 static inline void sanity_check_pinned_pages(struct page **pages, in sanity_check_pinned_pages() argument 53 for (; npages; npages--, pages++) { in sanity_check_pinned_pages() 54 struct page *page = *pages; in sanity_check_pinned_pages() 290 void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages, in unpin_user_pages_dirty_lock() argument 298 unpin_user_pages(pages, npages); in unpin_user_pages_dirty_lock() 302 sanity_check_pinned_pages(pages, npages); in unpin_user_pages_dirty_lock() 304 folio = gup_folio_next(pages, npages, i, &nr); in unpin_user_pages_dirty_lock() 375 static void gup_fast_unpin_user_pages(struct page **pages, unsigned long npages) in gup_fast_unpin_user_pages() argument 387 folio = gup_folio_next(pages, npages, i, &nr); in gup_fast_unpin_user_pages() 401 void unpin_user_pages(struct page **pages, unsigned long npages) in unpin_user_pages() argument [all …]
|
H A D | mincore.c | 187 static long do_mincore(unsigned long addr, unsigned long pages, unsigned char *vec) in do_mincore() argument 196 end = min(vma->vm_end, addr + (pages << PAGE_SHIFT)); in do_mincore() 198 unsigned long pages = DIV_ROUND_UP(end - addr, PAGE_SIZE); in do_mincore() local 199 memset(vec, 1, pages); in do_mincore() 200 return pages; in do_mincore() 236 unsigned long pages; in SYSCALL_DEFINE3() local 250 pages = len >> PAGE_SHIFT; in SYSCALL_DEFINE3() 251 pages += (offset_in_page(len)) != 0; in SYSCALL_DEFINE3() 253 if (!access_ok(vec, pages)) in SYSCALL_DEFINE3() 261 while (pages) { in SYSCALL_DEFINE3() [all …]
|
/linux/drivers/gpu/drm/i915/gem/selftests/ |
H A D | huge_gem_object.c | 12 struct sg_table *pages) in huge_free_pages() argument 18 for_each_sgt_page(page, sgt_iter, pages) { in huge_free_pages() 24 sg_free_table(pages); in huge_free_pages() 25 kfree(pages); in huge_free_pages() 34 struct sg_table *pages; in huge_get_pages() local 41 pages = kmalloc(sizeof(*pages), GFP); in huge_get_pages() 42 if (!pages) in huge_get_pages() 45 if (sg_alloc_table(pages, npages, GFP)) { in huge_get_pages() 46 kfree(pages); in huge_get_pages() 50 sg = pages->sgl; in huge_get_pages() [all …]
|
/linux/drivers/xen/ |
H A D | xlate_mmu.c | 48 static void xen_for_each_gfn(struct page **pages, unsigned nr_gfn, in xen_for_each_gfn() argument 57 page = pages[i / XEN_PFN_PER_PAGE]; in xen_for_each_gfn() 71 struct page **pages; member 99 struct page *page = info->pages[info->index++]; in remap_pte_fn() 148 struct page **pages) in xen_xlate_remap_gfn_array() argument 163 data.pages = pages; in xen_xlate_remap_gfn_array() 184 int nr, struct page **pages) in xen_xlate_unmap_gfn_range() argument 186 xen_for_each_gfn(pages, nr, unmap_gfn, NULL); in xen_xlate_unmap_gfn_range() 217 struct page **pages; in xen_xlate_map_ballooned_pages() local 226 pages = kcalloc(nr_pages, sizeof(pages[0]), GFP_KERNEL); in xen_xlate_map_ballooned_pages() [all …]
|
/linux/drivers/media/pci/intel/ipu6/ |
H A D | ipu6-dma.c | 23 struct page **pages; member 62 struct page **pages; in __alloc_buffer() local 65 pages = kvzalloc(array_size, GFP_KERNEL); in __alloc_buffer() 66 if (!pages) in __alloc_buffer() 74 pages[i] = alloc_pages(gfp, order); in __alloc_buffer() 75 while (!pages[i] && order) in __alloc_buffer() 76 pages[i] = alloc_pages(gfp, --order); in __alloc_buffer() 77 if (!pages[i]) in __alloc_buffer() 81 split_page(pages[i], order); in __alloc_buffer() 84 pages[i + j] = pages[i] + j; in __alloc_buffer() [all …]
|
/linux/kernel/dma/ |
H A D | remap.c | 17 return area->pages; in dma_common_find_pages() 24 void *dma_common_pages_remap(struct page **pages, size_t size, in dma_common_pages_remap() argument 29 vaddr = vmap(pages, PAGE_ALIGN(size) >> PAGE_SHIFT, in dma_common_pages_remap() 32 find_vm_area(vaddr)->pages = pages; in dma_common_pages_remap() 44 struct page **pages; in dma_common_contiguous_remap() local 48 pages = kvmalloc_array(count, sizeof(struct page *), GFP_KERNEL); in dma_common_contiguous_remap() 49 if (!pages) in dma_common_contiguous_remap() 52 pages[i] = nth_page(page, i); in dma_common_contiguous_remap() 53 vaddr = vmap(pages, count, VM_DMA_COHERENT, prot); in dma_common_contiguous_remap() 54 kvfree(pages); in dma_common_contiguous_remap()
|
/linux/arch/s390/hypfs/ |
H A D | hypfs_diag.c | 56 void *diag204_get_buffer(enum diag204_format fmt, int *pages) in diag204_get_buffer() argument 59 *pages = diag204_buf_pages; in diag204_get_buffer() 63 *pages = 1; in diag204_get_buffer() 65 *pages = diag204((unsigned long)DIAG204_SUBC_RSI | in diag204_get_buffer() 67 if (*pages <= 0) in diag204_get_buffer() 70 diag204_buf = __vmalloc_node(array_size(*pages, PAGE_SIZE), in diag204_get_buffer() 75 diag204_buf_pages = *pages; in diag204_get_buffer() 96 int pages, rc; in diag204_probe() local 98 buf = diag204_get_buffer(DIAG204_INFO_EXT, &pages); in diag204_probe() 101 (unsigned long)DIAG204_INFO_EXT, pages, buf) >= 0) { in diag204_probe() [all …]
|
/linux/drivers/staging/media/ipu3/ |
H A D | ipu3-dmamap.c | 20 static void imgu_dmamap_free_buffer(struct page **pages, in imgu_dmamap_free_buffer() argument 26 __free_page(pages[count]); in imgu_dmamap_free_buffer() 27 kvfree(pages); in imgu_dmamap_free_buffer() 36 struct page **pages; in imgu_dmamap_alloc_buffer() local 42 pages = kvmalloc_array(count, sizeof(*pages), GFP_KERNEL); in imgu_dmamap_alloc_buffer() 44 if (!pages) in imgu_dmamap_alloc_buffer() 72 imgu_dmamap_free_buffer(pages, i << PAGE_SHIFT); in imgu_dmamap_alloc_buffer() 77 pages[i++] = page++; in imgu_dmamap_alloc_buffer() 80 return pages; in imgu_dmamap_alloc_buffer() 100 struct page **pages; in imgu_dmamap_alloc() local [all …]
|
/linux/drivers/media/common/videobuf2/ |
H A D | frame_vector.c | 82 struct page **pages; in put_vaddr_frames() local 86 pages = frame_vector_pages(vec); in put_vaddr_frames() 92 if (WARN_ON(IS_ERR(pages))) in put_vaddr_frames() 95 unpin_user_pages(pages, vec->nr_frames); in put_vaddr_frames() 114 struct page **pages; in frame_vector_to_pages() local 122 pages = (struct page **)nums; in frame_vector_to_pages() 124 pages[i] = pfn_to_page(nums[i]); in frame_vector_to_pages() 140 struct page **pages; in frame_vector_to_pfns() local 144 pages = (struct page **)(vec->ptrs); in frame_vector_to_pfns() 145 nums = (unsigned long *)pages; in frame_vector_to_pfns() [all …]
|
/linux/drivers/tee/ |
H A D | tee_shm.c | 18 static void shm_put_kernel_pages(struct page **pages, size_t page_count) in shm_put_kernel_pages() argument 23 put_page(pages[n]); in shm_put_kernel_pages() 26 static void shm_get_kernel_pages(struct page **pages, size_t page_count) in shm_get_kernel_pages() argument 31 get_page(pages[n]); in shm_get_kernel_pages() 36 if (shm->pages) { in release_registered_pages() 38 unpin_user_pages(shm->pages, shm->num_pages); in release_registered_pages() 40 shm_put_kernel_pages(shm->pages, shm->num_pages); in release_registered_pages() 42 kfree(shm->pages); in release_registered_pages() 209 struct page **pages, in tee_dyn_shm_alloc_helper() argument 214 struct page **pages; in tee_dyn_shm_alloc_helper() local [all …]
|
/linux/net/rds/ |
H A D | info.c | 65 struct page **pages; member 122 iter->addr = kmap_atomic(*iter->pages); in rds_info_copy() 127 "bytes %lu\n", *iter->pages, iter->addr, in rds_info_copy() 140 iter->pages++; in rds_info_copy() 166 struct page **pages = NULL; in rds_info_getsockopt() local 190 pages = kmalloc_array(nr_pages, sizeof(struct page *), GFP_KERNEL); in rds_info_getsockopt() 191 if (!pages) { in rds_info_getsockopt() 195 ret = pin_user_pages_fast(start, nr_pages, FOLL_WRITE, pages); in rds_info_getsockopt() 214 iter.pages = pages; in rds_info_getsockopt() 237 if (pages) in rds_info_getsockopt() [all …]
|
/linux/include/linux/ |
H A D | balloon_compaction.h | 57 struct list_head pages; /* Pages enqueued & handled to Host */ member 67 struct list_head *pages); 69 struct list_head *pages, size_t n_req_pages); 75 INIT_LIST_HEAD(&balloon->pages); in balloon_devinfo_init() 97 list_add(&page->lru, &balloon->pages); in balloon_page_insert() 141 list_add(&page->lru, &balloon->pages); in balloon_page_insert() 164 static inline void balloon_page_push(struct list_head *pages, struct page *page) in balloon_page_push() argument 166 list_add(&page->lru, pages); in balloon_page_push() 176 static inline struct page *balloon_page_pop(struct list_head *pages) in balloon_page_pop() argument 178 struct page *page = list_first_entry_or_null(pages, struct page, lru); in balloon_page_pop()
|
/linux/Documentation/arch/powerpc/ |
H A D | vmemmap_dedup.rst | 14 With 2M PMD level mapping, we require 32 struct pages and a single 64K vmemmap 15 page can contain 1024 struct pages (64K/sizeof(struct page)). Hence there is no 18 With 1G PUD level mapping, we require 16384 struct pages and a single 64K 19 vmemmap page can contain 1024 struct pages (64K/sizeof(struct page)). Hence we 20 require 16 64K pages in vmemmap to map the struct page for 1G PUD level mapping. 46 With 4K page size, 2M PMD level mapping requires 512 struct pages and a single 47 4K vmemmap page contains 64 struct pages(4K/sizeof(struct page)). Hence we 48 require 8 4K pages in vmemmap to map the struct page for 2M pmd level mapping. 74 With 1G PUD level mapping, we require 262144 struct pages and a single 4K 75 vmemmap page can contain 64 struct pages (4K/sizeof(struct page)). Hence we [all …]
|
/linux/drivers/gpu/drm/i915/gem/ |
H A D | i915_gem_phys.c | 99 struct sg_table *pages) in i915_gem_object_put_pages_phys() argument 101 dma_addr_t dma = sg_dma_address(pages->sgl); in i915_gem_object_put_pages_phys() 102 void *vaddr = sg_page(pages->sgl); in i915_gem_object_put_pages_phys() 104 __i915_gem_object_release_shmem(obj, pages, false); in i915_gem_object_put_pages_phys() 131 sg_free_table(pages); in i915_gem_object_put_pages_phys() 132 kfree(pages); in i915_gem_object_put_pages_phys() 142 void *vaddr = sg_page(obj->mm.pages->sgl) + args->offset; in i915_gem_object_pwrite_phys() 173 void *vaddr = sg_page(obj->mm.pages->sgl) + args->offset; in i915_gem_object_pread_phys() 192 struct sg_table *pages; in i915_gem_object_shmem_to_phys() local 195 pages = __i915_gem_object_unset_pages(obj); in i915_gem_object_shmem_to_phys() [all …]
|
/linux/arch/m68k/mm/ |
H A D | sun3kmap.c | 47 unsigned long type, int pages) in do_pmeg_mapin() argument 53 while(pages) { in do_pmeg_mapin() 57 pages--; in do_pmeg_mapin() 66 int pages; in sun3_ioremap() local 85 pages = size / PAGE_SIZE; in sun3_ioremap() 89 while(pages) { in sun3_ioremap() 93 if(seg_pages > pages) in sun3_ioremap() 94 seg_pages = pages; in sun3_ioremap() 98 pages -= seg_pages; in sun3_ioremap()
|
/linux/drivers/net/ethernet/amd/xgbe/ |
H A D | xgbe-desc.c | 141 if (ring->rx_hdr_pa.pages) { in xgbe_free_ring() 144 put_page(ring->rx_hdr_pa.pages); in xgbe_free_ring() 146 ring->rx_hdr_pa.pages = NULL; in xgbe_free_ring() 152 if (ring->rx_buf_pa.pages) { in xgbe_free_ring() 155 put_page(ring->rx_buf_pa.pages); in xgbe_free_ring() 157 ring->rx_buf_pa.pages = NULL; in xgbe_free_ring() 289 struct page *pages = NULL; in xgbe_alloc_pages() local 300 pages = alloc_pages_node(node, gfp, order); in xgbe_alloc_pages() 301 if (pages) in xgbe_alloc_pages() 308 if (!pages && (node != NUMA_NO_NODE)) { in xgbe_alloc_pages() [all …]
|
/linux/fs/squashfs/ |
H A D | page_actor.c | 32 if (actor->next_page == actor->pages) in cache_next_page() 44 int pages, int length) in squashfs_page_actor_init() argument 51 actor->length = length ? : pages * PAGE_SIZE; in squashfs_page_actor_init() 53 actor->pages = pages; in squashfs_page_actor_init() 75 if ((actor->next_page == actor->pages) || in handle_next_page() 111 struct page **page, int pages, int length, loff_t start_index) in squashfs_page_actor_init_special() argument 128 actor->length = length ? : pages * PAGE_SIZE; in squashfs_page_actor_init_special() 130 actor->pages = pages; in squashfs_page_actor_init_special()
|
/linux/fs/erofs/ |
H A D | zutil.c | 12 struct page **pages; member 87 tmp_pages[j] = gbuf->pages[j]; in z_erofs_gbuf_growsize() 101 kfree(gbuf->pages); in z_erofs_gbuf_growsize() 102 gbuf->pages = tmp_pages; in z_erofs_gbuf_growsize() 115 tmp_pages[j] != gbuf->pages[j])) in z_erofs_gbuf_growsize() 141 z_erofs_rsvbuf->pages = kcalloc(z_erofs_rsv_nrpages, in z_erofs_gbuf_init() 142 sizeof(*z_erofs_rsvbuf->pages), GFP_KERNEL); in z_erofs_gbuf_init() 143 if (!z_erofs_rsvbuf->pages) { in z_erofs_gbuf_init() 165 if (!gbuf->pages) in z_erofs_gbuf_exit() 169 if (gbuf->pages[j]) in z_erofs_gbuf_exit() [all …]
|