| /linux/tools/testing/selftests/mm/ |
| H A D | hmm-tests.c | 184 unsigned long npages) in hmm_dmirror_cmd() argument 192 cmd.npages = npages; in hmm_dmirror_cmd() 275 unsigned long npages) in hmm_migrate_sys_to_dev() argument 277 return hmm_dmirror_cmd(fd, HMM_DMIRROR_MIGRATE_TO_DEV, buffer, npages); in hmm_migrate_sys_to_dev() 282 unsigned long npages) in hmm_migrate_dev_to_sys() argument 284 return hmm_dmirror_cmd(fd, HMM_DMIRROR_MIGRATE_TO_SYS, buffer, npages); in hmm_migrate_dev_to_sys() 300 unsigned long npages; in TEST_F() local 307 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift; in TEST_F() 308 ASSERT_NE(npages, 0); in TEST_F() 309 size = npages << self->page_shift; in TEST_F() [all …]
|
| /linux/drivers/gpu/drm/ |
| H A D | drm_pagemap.c | 171 static void drm_pagemap_migration_unlock_put_pages(unsigned long npages, in drm_pagemap_migration_unlock_put_pages() argument 176 for (i = 0; i < npages; ++i) { in drm_pagemap_migration_unlock_put_pages() 221 unsigned long npages, in drm_pagemap_migrate_map_pages() argument 226 for (i = 0; i < npages;) { in drm_pagemap_migrate_map_pages() 270 unsigned long npages, in drm_pagemap_migrate_unmap_pages() argument 275 for (i = 0; i < npages;) { in drm_pagemap_migrate_unmap_pages() 331 unsigned long i, npages = npages_in_range(start, end); in drm_pagemap_migrate_to_devmem() local 361 buf = kvcalloc(npages, 2 * sizeof(*migrate.src) + sizeof(*pagemap_addr) + in drm_pagemap_migrate_to_devmem() 367 pagemap_addr = buf + (2 * sizeof(*migrate.src) * npages); in drm_pagemap_migrate_to_devmem() 368 pages = buf + (2 * sizeof(*migrate.src) + sizeof(*pagemap_addr)) * npages; in drm_pagemap_migrate_to_devmem() [all …]
|
| /linux/arch/sparc/kernel/ |
| H A D | iommu.c | 158 unsigned long npages) in alloc_npages() argument 162 entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL, in alloc_npages() 204 int npages, nid; in dma_4u_alloc_coherent() local 233 npages = size >> IO_PAGE_SHIFT; in dma_4u_alloc_coherent() 235 while (npages--) { in dma_4u_alloc_coherent() 251 unsigned long order, npages; in dma_4u_free_coherent() local 253 npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT; in dma_4u_free_coherent() 256 iommu_tbl_range_free(&iommu->tbl, dvma, npages, IOMMU_ERROR_CODE); in dma_4u_free_coherent() 270 unsigned long flags, npages, oaddr; in dma_4u_map_phys() local 292 npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK); in dma_4u_map_phys() [all …]
|
| H A D | pci_sun4v.c | 61 unsigned long npages; /* Number of pages in list. */ member 75 p->npages = 0; in iommu_batch_start() 92 unsigned long npages = p->npages; in iommu_batch_flush() local 101 while (npages != 0) { in iommu_batch_flush() 105 npages, in iommu_batch_flush() 113 npages, prot, __pa(pglist), in iommu_batch_flush() 118 index_count = HV_PCI_IOTSB_INDEX_COUNT(npages, entry), in iommu_batch_flush() 136 npages -= num; in iommu_batch_flush() 141 p->npages = 0; in iommu_batch_flush() 150 if (p->entry + p->npages == entry) in iommu_batch_new_entry() [all …]
|
| /linux/drivers/infiniband/hw/hfi1/ |
| H A D | pin_system.c | 20 unsigned int npages; member 55 static u32 sdma_cache_evict(struct hfi1_user_sdma_pkt_q *pq, u32 npages) in sdma_cache_evict() argument 60 evict_data.target = npages; in sdma_cache_evict() 66 unsigned int start, unsigned int npages) in unpin_vector_pages() argument 68 hfi1_release_user_pages(mm, pages + start, npages, false); in unpin_vector_pages() 79 if (node->npages) { in free_system_node() 81 node->npages); in free_system_node() 82 atomic_sub(node->npages, &node->pq->n_locked); in free_system_node() 116 struct sdma_mmu_node *node, int npages) in pin_system_pages() argument 122 pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL); in pin_system_pages() [all …]
|
| H A D | user_pages.c | 30 u32 nlocked, u32 npages) in hfi1_can_pin_pages() argument 47 if (atomic64_read(&mm->pinned_vm) + npages > ulimit_pages) in hfi1_can_pin_pages() 66 if (nlocked + npages > (ulimit_pages / usr_ctxts / 4)) in hfi1_can_pin_pages() 74 if (nlocked + npages > cache_limit_pages) in hfi1_can_pin_pages() 80 int hfi1_acquire_user_pages(struct mm_struct *mm, unsigned long vaddr, size_t npages, in hfi1_acquire_user_pages() argument 86 ret = pin_user_pages_fast(vaddr, npages, gup_flags, pages); in hfi1_acquire_user_pages() 96 size_t npages, bool dirty) in hfi1_release_user_pages() argument 98 unpin_user_pages_dirty_lock(p, npages, dirty); in hfi1_release_user_pages() 101 atomic64_sub(npages, &mm->pinned_vm); in hfi1_release_user_pages()
|
| H A D | user_exp_rcv.c | 16 static u32 find_phys_blocks(struct tid_user_buf *tidbuf, unsigned int npages); 20 u16 pageidx, unsigned int npages); 136 unsigned int npages, in unpin_rcv_pages() argument 145 node->npages * PAGE_SIZE, DMA_FROM_DEVICE); in unpin_rcv_pages() 152 hfi1_release_user_pages(mm, pages, npages, mapped); in unpin_rcv_pages() 153 fd->tid_n_pinned -= npages; in unpin_rcv_pages() 162 unsigned int npages = tidbuf->npages; in pin_rcv_pages() local 167 if (npages > fd->uctxt->expected_count) { in pin_rcv_pages() 173 pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL); in pin_rcv_pages() 182 if (!hfi1_can_pin_pages(dd, current->mm, fd->tid_n_pinned, npages)) { in pin_rcv_pages() [all …]
|
| /linux/drivers/infiniband/hw/bng_re/ |
| H A D | bng_res.c | 75 pages = sginfo->npages; in bng_alloc_pbl() 131 u32 npages, pg_size; in bng_re_alloc_init_hwq() local 147 npages = (depth * stride) / pg_size; in bng_re_alloc_init_hwq() 149 npages++; in bng_re_alloc_init_hwq() 150 if (!npages) in bng_re_alloc_init_hwq() 152 hwq_attr->sginfo->npages = npages; in bng_re_alloc_init_hwq() 154 if (npages == MAX_PBL_LVL_0_PGS && !hwq_attr->sginfo->nopte) { in bng_re_alloc_init_hwq() 163 if (npages >= MAX_PBL_LVL_0_PGS) { in bng_re_alloc_init_hwq() 164 if (npages > MAX_PBL_LVL_1_PGS) { in bng_re_alloc_init_hwq() 167 npbl = npages >> MAX_PBL_LVL_1_PGS_SHIFT; in bng_re_alloc_init_hwq() [all …]
|
| /linux/drivers/iommu/iommufd/ |
| H A D | iova_bitmap.c | 48 unsigned long npages; member 168 unsigned long npages; in iova_bitmap_get() local 178 npages = DIV_ROUND_UP((bitmap->mapped_total_index - in iova_bitmap_get() 192 npages = min(npages + !!offset_in_page(addr), in iova_bitmap_get() 195 ret = pin_user_pages_fast((unsigned long)addr, npages, in iova_bitmap_get() 200 mapped->npages = (unsigned long)ret; in iova_bitmap_get() 223 if (mapped->npages) { in iova_bitmap_put() 224 unpin_user_pages(mapped->pages, mapped->npages); in iova_bitmap_put() 225 mapped->npages = 0; in iova_bitmap_put() 306 bytes = (bitmap->mapped.npages << PAGE_SHIFT) - bitmap->mapped.pgoff; in iova_bitmap_mapped_remaining() [all …]
|
| H A D | pages.c | 167 static void iopt_pages_add_npinned(struct iopt_pages *pages, size_t npages) in iopt_pages_add_npinned() argument 171 rc = check_add_overflow(pages->npinned, npages, &pages->npinned); in iopt_pages_add_npinned() 173 WARN_ON(rc || pages->npinned > pages->npages); in iopt_pages_add_npinned() 176 static void iopt_pages_sub_npinned(struct iopt_pages *pages, size_t npages) in iopt_pages_sub_npinned() argument 180 rc = check_sub_overflow(pages->npinned, npages, &pages->npinned); in iopt_pages_sub_npinned() 182 WARN_ON(rc || pages->npinned > pages->npages); in iopt_pages_sub_npinned() 190 unsigned long npages = last_index - start_index + 1; in iopt_pages_err_unpin() local 192 unpin_user_pages(page_list, npages); in iopt_pages_err_unpin() 193 iopt_pages_sub_npinned(pages, npages); in iopt_pages_err_unpin() 653 size_t npages) in batch_from_pages() argument [all …]
|
| /linux/drivers/gpu/drm/amd/amdkfd/ |
| H A D | kfd_migrate.c | 48 svm_migrate_gart_map(struct amdgpu_ring *ring, u64 npages, in svm_migrate_gart_map() argument 64 num_bytes = npages * 8; in svm_migrate_gart_map() 93 amdgpu_gart_map(adev, 0, npages, addr, pte_flags, cpu_addr); in svm_migrate_gart_map() 125 u64 *vram, u64 npages, in svm_migrate_copy_memory_gart() argument 138 while (npages) { in svm_migrate_copy_memory_gart() 139 size = min(GTT_MAX_PAGES, npages); in svm_migrate_copy_memory_gart() 164 npages -= size; in svm_migrate_copy_memory_gart() 165 if (npages) { in svm_migrate_copy_memory_gart() 268 for (i = 0; i < migrate->npages; i++) { in svm_migrate_successful_pages() 281 u64 npages = migrate->npages; in svm_migrate_copy_to_vram() local [all …]
|
| /linux/arch/powerpc/kernel/ |
| H A D | iommu.c | 217 unsigned long npages, in iommu_range_alloc() argument 224 int largealloc = npages > 15; in iommu_range_alloc() 236 if (unlikely(npages == 0)) { in iommu_range_alloc() 290 n = iommu_area_alloc(tbl->it_map, limit, start, npages, tbl->it_offset, in iommu_range_alloc() 326 end = n + npages; in iommu_range_alloc() 348 void *page, unsigned int npages, in iommu_alloc() argument 357 entry = iommu_range_alloc(dev, tbl, npages, NULL, mask, align_order); in iommu_alloc() 366 build_fail = tbl->it_ops->set(tbl, entry, npages, in iommu_alloc() 376 __iommu_free(tbl, ret, npages); in iommu_alloc() 391 unsigned int npages) in iommu_free_check() argument [all …]
|
| /linux/mm/ |
| H A D | migrate_device.c | 29 migrate->dst[migrate->npages] = 0; in migrate_vma_collect_skip() 30 migrate->src[migrate->npages++] = 0; in migrate_vma_collect_skip() 52 migrate->src[migrate->npages] = MIGRATE_PFN_MIGRATE | in migrate_vma_collect_hole() 54 migrate->dst[migrate->npages] = 0; in migrate_vma_collect_hole() 55 migrate->npages++; in migrate_vma_collect_hole() 66 migrate->src[migrate->npages] = MIGRATE_PFN_MIGRATE; in migrate_vma_collect_hole() 67 migrate->dst[migrate->npages] = 0; in migrate_vma_collect_hole() 68 migrate->npages++; in migrate_vma_collect_hole() 212 migrate->src[migrate->npages] = migrate_pfn(pfn) | write in migrate_vma_collect_huge_pmd() 215 migrate->dst[migrate->npages++] = 0; in migrate_vma_collect_huge_pmd() [all …]
|
| H A D | hmm.c | 132 const unsigned long hmm_pfns[], unsigned long npages, in hmm_range_need_fault() argument 148 for (i = 0; i < npages; ++i) { in hmm_range_need_fault() 163 unsigned long i, npages; in hmm_vma_walk_hole() local 167 npages = (end - addr) >> PAGE_SHIFT; in hmm_vma_walk_hole() 170 hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, 0); in hmm_vma_walk_hole() 203 unsigned long pfn, npages, i; in hmm_vma_handle_pmd() local 207 npages = (end - addr) >> PAGE_SHIFT; in hmm_vma_handle_pmd() 210 hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, cpu_flags); in hmm_vma_handle_pmd() 341 unsigned long npages = (end - start) >> PAGE_SHIFT; in hmm_vma_handle_absent_pmd() local 371 npages, 0); in hmm_vma_handle_absent_pmd() [all …]
|
| /linux/drivers/infiniband/hw/mthca/ |
| H A D | mthca_memfree.c | 69 dma_unmap_sg(&dev->pdev->dev, chunk->mem, chunk->npages, in mthca_free_icm_pages() 72 for (i = 0; i < chunk->npages; ++i) in mthca_free_icm_pages() 81 for (i = 0; i < chunk->npages; ++i) { in mthca_free_icm_coherent() 137 struct mthca_icm *mthca_alloc_icm(struct mthca_dev *dev, int npages, in mthca_alloc_icm() argument 157 while (npages > 0) { in mthca_alloc_icm() 165 chunk->npages = 0; in mthca_alloc_icm() 170 while (1 << cur_order > npages) in mthca_alloc_icm() 175 &chunk->mem[chunk->npages], in mthca_alloc_icm() 178 ret = mthca_alloc_icm_pages(&chunk->mem[chunk->npages], in mthca_alloc_icm() 182 ++chunk->npages; in mthca_alloc_icm() [all …]
|
| H A D | mthca_allocator.c | 195 int npages, shift; in mthca_buf_alloc() local 202 npages = 1; in mthca_buf_alloc() 214 npages *= 2; in mthca_buf_alloc() 217 dma_list = kmalloc_array(npages, sizeof(*dma_list), in mthca_buf_alloc() 222 for (i = 0; i < npages; ++i) in mthca_buf_alloc() 226 npages = (size + PAGE_SIZE - 1) / PAGE_SIZE; in mthca_buf_alloc() 229 dma_list = kmalloc_array(npages, sizeof(*dma_list), in mthca_buf_alloc() 234 buf->page_list = kmalloc_array(npages, in mthca_buf_alloc() 240 for (i = 0; i < npages; ++i) in mthca_buf_alloc() 243 for (i = 0; i < npages; ++i) { in mthca_buf_alloc() [all …]
|
| /linux/arch/x86/mm/ |
| H A D | cpu_entry_area.c | 108 unsigned int npages; in percpu_setup_debug_store() local 115 npages = sizeof(struct debug_store) / PAGE_SIZE; in percpu_setup_debug_store() 117 cea_map_percpu_pages(cea, &per_cpu(cpu_debug_store, cpu), npages, in percpu_setup_debug_store() 125 npages = sizeof(struct debug_store_buffers) / PAGE_SIZE; in percpu_setup_debug_store() 126 for (; npages; npages--, cea += PAGE_SIZE) in percpu_setup_debug_store() 134 npages = sizeof(estacks->name## _stack) / PAGE_SIZE; \ 136 estacks->name## _stack, npages, PAGE_KERNEL); \ 143 unsigned int npages; in percpu_setup_exception_stacks() local
|
| /linux/drivers/infiniband/core/ |
| H A D | ib_core_uverbs.c | 141 pgoff, entry->npages); in rdma_user_mmap_entry_get_pgoff() 171 if (entry->npages * PAGE_SIZE != vma->vm_end - vma->vm_start) { in rdma_user_mmap_entry_get() 191 for (i = 0; i < entry->npages; i++) in rdma_user_mmap_entry_free() 196 entry->start_pgoff, entry->npages); in rdma_user_mmap_entry_free() 269 u32 xa_first, xa_last, npages; in rdma_user_mmap_entry_insert_range() local 290 npages = (u32)DIV_ROUND_UP(length, PAGE_SIZE); in rdma_user_mmap_entry_insert_range() 291 entry->npages = npages; in rdma_user_mmap_entry_insert_range() 301 if (check_add_overflow(xa_first, npages, &xa_last)) in rdma_user_mmap_entry_insert_range() 328 entry->start_pgoff, npages); in rdma_user_mmap_entry_insert_range()
|
| H A D | umem.c | 174 unsigned long npages; in ib_umem_get() local 213 npages = ib_umem_num_pages(umem); in ib_umem_get() 214 if (npages == 0 || npages > UINT_MAX) { in ib_umem_get() 221 new_pinned = atomic64_add_return(npages, &mm->pinned_vm); in ib_umem_get() 223 atomic64_sub(npages, &mm->pinned_vm); in ib_umem_get() 233 while (npages) { in ib_umem_get() 236 min_t(unsigned long, npages, in ib_umem_get() 246 npages -= pinned; in ib_umem_get() 250 npages, GFP_KERNEL); in ib_umem_get()
|
| /linux/tools/testing/selftests/kvm/ |
| H A D | memslot_perf_test.c | 89 uint64_t npages; 197 TEST_ASSERT(gpa < MEM_GPA + data->npages * guest_page_size, in vm_gpa2hva() 210 slotpages = data->npages - slot * data->pages_per_slot; in vm_gpa2hva() 268 mempages = data->npages; in get_max_slots() 301 data->npages = mempages; in prepare_vm() 302 TEST_ASSERT(data->npages > 1, "Can't test without any memory"); in prepare_vm() 304 data->pages_per_slot = data->npages / data->nslots; in prepare_vm() 305 rempages = data->npages % data->nslots; in prepare_vm() 320 uint64_t npages; in prepare_vm() 322 npages in prepare_vm() 88 uint64_t npages; global() member 319 uint64_t npages; prepare_vm() local 333 uint64_t npages; prepare_vm() local 644 uint64_t npages; test_memslot_do_unmap() local [all...] |
| /linux/arch/x86/boot/startup/ |
| H A D | sev-startup.c | 49 unsigned long npages, const struct psc_desc *desc) in early_set_pages_state() argument 56 paddr_end = paddr + (npages << PAGE_SHIFT); in early_set_pages_state() 67 unsigned long npages) in early_snp_set_memory_private() argument 88 early_set_pages_state(vaddr, paddr, npages, &d); in early_snp_set_memory_private() 92 unsigned long npages) in early_snp_set_memory_shared() argument 110 early_set_pages_state(vaddr, paddr, npages, &d); in early_snp_set_memory_shared()
|
| /linux/drivers/infiniband/hw/vmw_pvrdma/ |
| H A D | pvrdma_misc.c | 53 u64 npages, bool alloc_pages) in pvrdma_page_dir_init() argument 57 if (npages > PVRDMA_PAGE_DIR_MAX_PAGES) in pvrdma_page_dir_init() 67 pdir->ntables = PVRDMA_PAGE_DIR_TABLE(npages - 1) + 1; in pvrdma_page_dir_init() 81 pdir->npages = npages; in pvrdma_page_dir_init() 84 pdir->pages = kcalloc(npages, sizeof(*pdir->pages), in pvrdma_page_dir_init() 89 for (i = 0; i < pdir->npages; i++) { in pvrdma_page_dir_init() 127 for (i = 0; i < pdir->npages && pdir->pages[i]; i++) { in pvrdma_page_dir_cleanup_pages() 173 if (idx >= pdir->npages) in pvrdma_page_dir_insert_dma() 189 if (offset >= pdir->npages) in pvrdma_page_dir_insert_umem() 212 if (num_pages > pdir->npages) in pvrdma_page_dir_insert_page_list()
|
| /linux/drivers/net/ethernet/mellanox/mlx4/ |
| H A D | icm.c | 60 dma_unmap_sg(&dev->persist->pdev->dev, chunk->sg, chunk->npages, in mlx4_free_icm_pages() 63 for (i = 0; i < chunk->npages; ++i) in mlx4_free_icm_pages() 72 for (i = 0; i < chunk->npages; ++i) in mlx4_free_icm_coherent() 132 struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages, in mlx4_alloc_icm() argument 159 while (npages > 0) { in mlx4_alloc_icm() 179 while (1 << cur_order > npages) in mlx4_alloc_icm() 188 &chunk->buf[chunk->npages], in mlx4_alloc_icm() 191 ret = mlx4_alloc_icm_pages(&chunk->sg[chunk->npages], in mlx4_alloc_icm() 202 ++chunk->npages; in mlx4_alloc_icm() 206 else if (chunk->npages == MLX4_ICM_CHUNK_LEN) { in mlx4_alloc_icm() [all …]
|
| /linux/fs/netfs/ |
| H A D | iterator.c | 44 unsigned int npages = 0; in netfs_extract_user_iter() local 66 while (count && npages < max_pages) { in netfs_extract_user_iter() 68 max_pages - npages, extraction_flags, in netfs_extract_user_iter() 84 if (npages + cur_npages > max_pages) { in netfs_extract_user_iter() 86 npages + cur_npages, max_pages); in netfs_extract_user_iter() 92 bvec_set_page(bv + npages + i, *pages++, len - offset, offset); in netfs_extract_user_iter() 97 npages += cur_npages; in netfs_extract_user_iter() 100 iov_iter_bvec(new, orig->data_source, bv, npages, orig_len - count); in netfs_extract_user_iter() 101 return npages; in netfs_extract_user_iter()
|
| /linux/arch/powerpc/sysdev/ |
| H A D | dart_iommu.c | 172 long npages, unsigned long uaddr, in dart_build() argument 180 DBG("dart: build at: %lx, %lx, addr: %x\n", index, npages, uaddr); in dart_build() 187 l = npages; in dart_build() 195 dart_cache_sync(orig_dp, npages); in dart_build() 199 while (npages--) in dart_build() 208 static void dart_free(struct iommu_table *tbl, long index, long npages) in dart_free() argument 211 long orig_npages = npages; in dart_free() 218 DBG("dart: free at: %lx, %lx\n", index, npages); in dart_free() 222 while (npages--) in dart_free()
|