| /linux/tools/testing/selftests/mm/ |
| H A D | mremap_dontunmap.c | 45 unsigned long num_pages = 1; in kernel_support_for_mremap_dontunmap() local 46 void *source_mapping = mmap(NULL, num_pages * page_size, PROT_NONE, in kernel_support_for_mremap_dontunmap() 53 mremap(source_mapping, num_pages * page_size, num_pages * page_size, in kernel_support_for_mremap_dontunmap() 58 BUG_ON(munmap(dest_mapping, num_pages * page_size) == -1, in kernel_support_for_mremap_dontunmap() 62 BUG_ON(munmap(source_mapping, num_pages * page_size) == -1, in kernel_support_for_mremap_dontunmap() 78 unsigned long num_pages = size / page_size; in check_region_contains_byte() local 82 for (i = 0; i < num_pages; ++i) { in check_region_contains_byte() 97 unsigned long num_pages = 5; in mremap_dontunmap_simple() local 100 mmap(NULL, num_pages * page_size, PROT_READ | PROT_WRITE, in mremap_dontunmap_simple() 104 memset(source_mapping, 'a', num_pages * page_size); in mremap_dontunmap_simple() [all …]
|
| /linux/drivers/gpu/drm/vmwgfx/ |
| H A D | vmwgfx_gmr.c | 39 unsigned long num_pages, in vmw_gmr2_bind() argument 47 uint32_t remap_num = num_pages / VMW_PPN_PER_REMAP + ((num_pages % VMW_PPN_PER_REMAP) > 0); in vmw_gmr2_bind() 48 uint32_t remap_size = VMW_PPN_SIZE * num_pages + (sizeof(remap_cmd) + sizeof(*cmd)) * remap_num; in vmw_gmr2_bind() 58 define_cmd.numPages = num_pages; in vmw_gmr2_bind() 73 while (num_pages > 0) { in vmw_gmr2_bind() 74 unsigned long nr = min_t(unsigned long, num_pages, VMW_PPN_PER_REMAP); in vmw_gmr2_bind() 94 num_pages -= nr; in vmw_gmr2_bind() 128 unsigned long num_pages, in vmw_gmr_bind() argument 141 return vmw_gmr2_bind(dev_priv, &data_iter, num_pages, gmr_id); in vmw_gmr_bind()
|
| H A D | vmwgfx_ttm_buffer.c | 70 return ++(viter->i) < viter->num_pages; in __vmw_piter_non_sg_next() 107 viter->num_pages = vsgt->num_pages; in vmw_piter_start() 184 vsgt->num_pages = vmw_tt->dma_ttm.num_pages; in vmw_ttm_map_dma() 196 vsgt->pages, vsgt->num_pages, 0, in vmw_ttm_map_dma() 197 (unsigned long)vsgt->num_pages << PAGE_SHIFT, in vmw_ttm_map_dma() 296 ttm->num_pages, vmw_be->gmr_id); in vmw_ttm_bind() 301 vmw_mob_create(ttm->num_pages); in vmw_ttm_bind() 307 &vmw_be->vsgt, ttm->num_pages, in vmw_ttm_bind() 373 ttm->num_pages); in vmw_ttm_populate()
|
| /linux/drivers/xen/ |
| H A D | xen-front-pgdir-shbuf.c | 160 return DIV_ROUND_UP(buf->num_pages, XEN_NUM_GREFS_PER_PAGE); in get_num_pages_dir() 187 buf->num_grefs = get_num_pages_dir(buf) + buf->num_pages; in guest_calc_num_grefs() 208 unmap_ops = kcalloc(buf->num_pages, sizeof(*unmap_ops), in backend_unmap() 213 for (i = 0; i < buf->num_pages; i++) { in backend_unmap() 222 buf->num_pages); in backend_unmap() 224 for (i = 0; i < buf->num_pages; i++) { in backend_unmap() 253 map_ops = kcalloc(buf->num_pages, sizeof(*map_ops), GFP_KERNEL); in backend_map() 257 buf->backend_map_handles = kcalloc(buf->num_pages, in backend_map() 271 grefs_left = buf->num_pages; in backend_map() 295 ret = gnttab_map_refs(map_ops, NULL, buf->pages, buf->num_pages); in backend_map() [all …]
|
| /linux/tools/testing/scatterlist/ |
| H A D | main.c | 10 unsigned num_pages; member 40 printf("%u input PFNs:", test->num_pages); in fail() 41 for (i = 0; i < test->num_pages; i++) in fail() 87 int left_pages = test->pfn_app ? test->num_pages : 0; in main() 92 set_pages(pages, test->pfn, test->num_pages); in main() 96 &append, pages, test->num_pages, 0, test->size, in main() 100 &append.sgt, pages, test->num_pages, 0, in main() 109 set_pages(pages, test->pfn_app, test->num_pages); in main() 111 &append, pages, test->num_pages, 0, test->size, in main()
|
| /linux/net/ceph/ |
| H A D | pagevec.c | 13 void ceph_put_page_vector(struct page **pages, int num_pages, bool dirty) in ceph_put_page_vector() argument 17 for (i = 0; i < num_pages; i++) { in ceph_put_page_vector() 26 void ceph_release_page_vector(struct page **pages, int num_pages) in ceph_release_page_vector() argument 30 for (i = 0; i < num_pages; i++) in ceph_release_page_vector() 39 struct page **ceph_alloc_page_vector(int num_pages, gfp_t flags) in ceph_alloc_page_vector() argument 44 pages = kmalloc_array(num_pages, sizeof(*pages), flags); in ceph_alloc_page_vector() 47 for (i = 0; i < num_pages; i++) { in ceph_alloc_page_vector()
|
| /linux/arch/x86/platform/efi/ |
| H A D | memmap.c | 129 end = start + (md->num_pages << EFI_PAGE_SHIFT) - 1; in efi_memmap_split_count() 194 end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) - 1; in efi_memmap_insert() 203 md->num_pages = (m_end - md->phys_addr + 1) >> in efi_memmap_insert() 210 md->num_pages = (end - md->phys_addr + 1) >> in efi_memmap_insert() 216 md->num_pages = (m_start - md->phys_addr) >> in efi_memmap_insert() 224 md->num_pages = (m_end - m_start + 1) >> in efi_memmap_insert() 231 md->num_pages = (end - m_end) >> in efi_memmap_insert() 238 md->num_pages = (m_start - md->phys_addr) >> in efi_memmap_insert() 245 md->num_pages = (end - md->phys_addr + 1) >> in efi_memmap_insert()
|
| H A D | efi_32.c | 43 size = md->num_pages << PAGE_SHIFT; in efi_map_region() 51 set_memory_uc((unsigned long)va, md->num_pages); in efi_map_region() 82 int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages) in efi_setup_page_tables() argument 139 set_memory_x(md->virt_addr, md->num_pages); in efi_runtime_update_mappings()
|
| /linux/drivers/infiniband/sw/siw/ |
| H A D | siw_mem.c | 44 int i, num_pages = umem->num_pages; in siw_umem_release() local 49 for (i = 0; num_pages > 0; i++) { in siw_umem_release() 51 num_pages -= PAGES_PER_CHUNK; in siw_umem_release() 341 int num_pages, num_chunks, i, rv = 0; in siw_umem_get() local 347 num_pages = PAGE_ALIGN(start + len - first_page_va) >> PAGE_SHIFT; in siw_umem_get() 348 num_chunks = (num_pages >> CHUNK_SHIFT) + 1; in siw_umem_get() 376 for (i = 0; num_pages > 0; i++) { in siw_umem_get() 377 int nents = min_t(int, num_pages, PAGES_PER_CHUNK); in siw_umem_get() 388 umem->num_pages++; in siw_umem_get() 389 num_pages--; in siw_umem_get()
|
| /linux/drivers/tee/ |
| H A D | tee_shm.c | 46 unpin_user_pages(shm->pages, shm->num_pages); in release_registered_pages() 48 shm_put_kernel_pages(shm->pages, shm->num_pages); in release_registered_pages() 356 size_t num_pages, in tee_dyn_shm_alloc_helper() argument 386 shm->num_pages = nr_pages; in tee_dyn_shm_alloc_helper() 425 size_t num_pages, off; in register_shm_helper() local 453 num_pages = iov_iter_npages(iter, INT_MAX); in register_shm_helper() 454 if (!num_pages) { in register_shm_helper() 459 shm->pages = kcalloc(num_pages, sizeof(*shm->pages), GFP_KERNEL); in register_shm_helper() 465 len = iov_iter_extract_pages(iter, &shm->pages, LONG_MAX, num_pages, 0, in register_shm_helper() 470 } else if (DIV_ROUND_UP(len + off, PAGE_SIZE) != num_pages) { in register_shm_helper() [all …]
|
| /linux/arch/riscv/include/asm/ |
| H A D | set_memory.h | 21 int num_pages)) in set_kernel_memory() argument 25 int num_pages = PAGE_ALIGN(end - start) >> PAGE_SHIFT; in set_kernel_memory() local 27 return set_memory(start, num_pages); in set_kernel_memory() 37 int num_pages)) in set_kernel_memory() argument
|
| /linux/drivers/hv/ |
| H A D | hv_proc.c | 19 int hv_call_deposit_pages(int node, u64 partition_id, u32 num_pages) in hv_call_deposit_pages() argument 32 if (num_pages > HV_DEPOSIT_MAX) in hv_call_deposit_pages() 34 if (!num_pages) in hv_call_deposit_pages() 52 while (num_pages) { in hv_call_deposit_pages() 54 order = 31 - __builtin_clz(num_pages); in hv_call_deposit_pages() 70 num_pages -= counts[i]; in hv_call_deposit_pages()
|
| /linux/drivers/gpu/drm/xe/ |
| H A D | xe_shrinker.c | 142 unsigned long num_pages; in xe_shrinker_count() local 145 num_pages = ttm_backup_bytes_avail() >> PAGE_SHIFT; in xe_shrinker_count() 149 num_pages = min_t(unsigned long, num_pages, shrinker->shrinkable_pages); in xe_shrinker_count() 151 num_pages = 0; in xe_shrinker_count() 153 num_pages += shrinker->purgeable_pages; in xe_shrinker_count() 156 return num_pages ? num_pages : SHRINK_EMPTY; in xe_shrinker_count()
|
| /linux/fs/crypto/ |
| H A D | bio.c | 58 int num_pages = 0; in fscrypt_zeroout_range_inline_crypt() local 68 if (num_pages == 0) { in fscrypt_zeroout_range_inline_crypt() 78 num_pages++; in fscrypt_zeroout_range_inline_crypt() 82 if (num_pages == BIO_MAX_VECS || !len || in fscrypt_zeroout_range_inline_crypt() 88 num_pages = 0; in fscrypt_zeroout_range_inline_crypt()
|
| /linux/drivers/gpu/drm/ |
| H A D | drm_cache.c | 67 unsigned long num_pages) in drm_cache_flush_clflush() argument 72 for (i = 0; i < num_pages; i++) in drm_cache_flush_clflush() 87 drm_clflush_pages(struct page *pages[], unsigned long num_pages) in drm_clflush_pages() argument 92 drm_cache_flush_clflush(pages, num_pages); in drm_clflush_pages() 101 for (i = 0; i < num_pages; i++) { in drm_clflush_pages()
|
| /linux/drivers/gpu/drm/ttm/tests/ |
| H A D | ttm_tt_test.c | 47 int num_pages = params->size >> PAGE_SHIFT; in ttm_tt_init_basic() local 58 KUNIT_ASSERT_EQ(test, tt->num_pages, num_pages + extra_pages); in ttm_tt_init_basic() 73 int num_pages = (size + SZ_4K) >> PAGE_SHIFT; in ttm_tt_init_misaligned() local 87 KUNIT_ASSERT_EQ(test, tt->num_pages, num_pages); in ttm_tt_init_misaligned() 344 int err, num_pages; in ttm_tt_swapin_basic() local 358 num_pages = ttm_tt_swapout(devs->ttm_dev, tt, GFP_KERNEL); in ttm_tt_swapin_basic() 359 KUNIT_ASSERT_EQ(test, num_pages, expected_num_pages); in ttm_tt_swapin_basic()
|
| /linux/tools/testing/selftests/cachestat/ |
| H A D | test_cachestat.c | 124 bool test_fsync, unsigned long num_pages, in test_cachestat() argument 128 int filesize = num_pages * PS; in test_cachestat() 165 if (cs.nr_cache + cs.nr_evicted != num_pages) { in test_cachestat() 233 unsigned long num_pages = compute_len / PS; in run_cachestat_test() local 284 if (cs.nr_cache + cs.nr_evicted != num_pages) { in run_cachestat_test()
|
| /linux/tools/mm/ |
| H A D | thp_swap_allocator_test.c | 68 size_t num_pages = total_dontneed_size / align_size; in random_madvise_dontneed() local 73 for (i = 0; i < num_pages; ++i) { in random_madvise_dontneed() 86 size_t num_pages = total_swapin_size / align_size; in random_swapin() local 91 for (i = 0; i < num_pages; ++i) { in random_swapin()
|
| /linux/drivers/gpu/drm/i915/gt/ |
| H A D | selftest_reset.c | 24 resource_size_t num_pages, page; in __igt_reset_stolen() local 37 num_pages = resource_size(dsm) >> PAGE_SHIFT; in __igt_reset_stolen() 38 if (!num_pages) in __igt_reset_stolen() 41 crc = kmalloc_array(num_pages, sizeof(u32), GFP_KERNEL); in __igt_reset_stolen() 82 for (page = 0; page < num_pages; page++) { in __igt_reset_stolen() 124 for (page = 0; page < num_pages; page++) { in __igt_reset_stolen()
|
| /linux/include/crypto/ |
| H A D | scatterwalk.h | 189 unsigned int num_pages; in __scatterwalk_flush_dcache_pages() local 198 num_pages = nbytes / PAGE_SIZE; in __scatterwalk_flush_dcache_pages() 199 num_pages += DIV_ROUND_UP(offset + (nbytes % PAGE_SIZE), PAGE_SIZE); in __scatterwalk_flush_dcache_pages() 201 for (unsigned int i = 0; i < num_pages; i++) in __scatterwalk_flush_dcache_pages()
|
| /linux/include/xen/ |
| H A D | xen-front-pgdir-shbuf.h | 42 int num_pages; member 62 int num_pages; member
|
| /linux/drivers/gpu/drm/ttm/ |
| H A D | ttm_bo_util.c | 89 u32 num_pages, in ttm_move_memcpy() argument 104 for (i = 0; i < num_pages; ++i) { in ttm_move_memcpy() 116 for (i = 0; i < num_pages; ++i) { in ttm_move_memcpy() 342 unsigned long num_pages, in ttm_bo_kmap_ttm() argument 362 if (num_pages == 1 && ttm->caching == ttm_cached && in ttm_bo_kmap_ttm() 379 map->virtual = vmap(ttm->pages + start_page, num_pages, in ttm_bo_kmap_ttm() 428 unsigned long start_page, unsigned long num_pages, in ttm_bo_kmap() argument 436 if (num_pages > PFN_UP(bo->resource->size)) in ttm_bo_kmap() 438 if ((start_page + num_pages) > PFN_UP(bo->resource->size)) in ttm_bo_kmap() 445 return ttm_bo_kmap_ttm(bo, start_page, num_pages, map); in ttm_bo_kmap() [all …]
|
| /linux/drivers/misc/vmw_vmci/ |
| H A D | vmci_queue_pair.c | 128 size_t num_pages; /* Number of pages incl. header. */ member 278 u64 num_pages; in qp_alloc_queue() local 282 num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1; in qp_alloc_queue() 283 if (num_pages > in qp_alloc_queue() 289 pas_size = num_pages * sizeof(*queue->kernel_if->u.g.pas); in qp_alloc_queue() 290 vas_size = num_pages * sizeof(*queue->kernel_if->u.g.vas); in qp_alloc_queue() 301 queue->kernel_if->num_pages = num_pages; in qp_alloc_queue() 307 for (i = 0; i < num_pages; i++) { in qp_alloc_queue() 530 u64 num_pages; in qp_host_alloc_queue() local 535 num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1; in qp_host_alloc_queue() [all …]
|
| /linux/drivers/virt/ |
| H A D | fsl_hypervisor.c | 155 unsigned int num_pages; in ioctl_memcpy() local 221 num_pages = (param.count + lb_offset + PAGE_SIZE - 1) >> PAGE_SHIFT; in ioctl_memcpy() 229 pages = kcalloc(num_pages, sizeof(struct page *), GFP_KERNEL); in ioctl_memcpy() 239 sg_list_unaligned = kmalloc(num_pages * sizeof(struct fh_sg_list) + in ioctl_memcpy() 250 num_pages, param.source != -1 ? FOLL_WRITE : 0, pages); in ioctl_memcpy() 252 if (num_pinned != num_pages) { in ioctl_memcpy() 274 for (i = 1; i < num_pages; i++) { in ioctl_memcpy() 291 virt_to_phys(sg_list), num_pages); in ioctl_memcpy()
|
| /linux/drivers/gpu/drm/nouveau/ |
| H A D | nouveau_mem.c | 195 u32 num_pages = PFN_UP(size); in nouveau_mem_intersects() local 198 if (place->fpfn >= (res->start + num_pages) || in nouveau_mem_intersects() 210 u32 num_pages = PFN_UP(size); in nouveau_mem_compatible() local 213 (place->lpfn && (res->start + num_pages) > place->lpfn)) in nouveau_mem_compatible()
|