Home
last modified time | relevance | path

Searched refs:npages (Results 1 – 25 of 137) sorted by relevance

123456

/freebsd/contrib/netbsd-tests/sys/uvm/
H A Dt_uvm_physseg.c281 uvmexp.npages = 0; in setup()
487 ATF_REQUIRE_EQ(0, uvmexp.npages); in ATF_TC_BODY()
492 ATF_REQUIRE_EQ(0, uvmexp.npages); in ATF_TC_BODY()
501 , uvmexp.npages); in ATF_TC_BODY()
510 + npages3, uvmexp.npages); in ATF_TC_BODY()
554 ATF_REQUIRE_EQ(0, uvmexp.npages); /* Boot time sanity */ in ATF_TC_BODY()
579 ATF_CHECK_EQ(atop(FIVEONETWO_KILO), uvmexp.npages); in ATF_TC_BODY()
648 ATF_REQUIRE_EQ(0, uvmexp.npages); in ATF_TC_BODY()
661 ATF_REQUIRE_EQ(0, uvmexp.npages); in ATF_TC_BODY()
692 ATF_REQUIRE_EQ(0, uvmexp.npages); in ATF_TC_BODY()
[all …]
/freebsd/sys/dev/mlx5/mlx5_core/
H A Dmlx5_pagealloc.c40 s32 npages; member
298 s32 *npages, int boot) in mlx5_cmd_query_pages() argument
313 *npages = MLX5_GET(query_pages_out, out, num_pages); in mlx5_cmd_query_pages()
319 static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages, in give_pages() argument
329 inlen += npages * MLX5_FLD_SZ_BYTES(manage_pages_in, pas[0]); in give_pages()
337 for (i = 0; i < npages; i++) { in give_pages()
347 MLX5_SET(manage_pages_in, in, input_num_entries, npages); in give_pages()
352 func_id, npages, err); in give_pages()
355 dev->priv.fw_pages += npages; in give_pages()
356 dev->priv.pages_per_func[func_id] += npages; in give_pages()
[all …]
H A Dmlx5_alloc.c77 buf->npages = howmany(size, PAGE_SIZE); in mlx5_buf_alloc()
81 buf->page_list = kcalloc(buf->npages, sizeof(*buf->page_list), in mlx5_buf_alloc()
91 PAGE_SIZE * buf->npages, /* maxsize */ in mlx5_buf_alloc()
92 buf->npages, /* nsegments */ in mlx5_buf_alloc()
111 PAGE_SIZE * buf->npages, &mlx5_buf_load_mem_cb, in mlx5_buf_alloc()
125 memset(buf->direct.buf, 0, PAGE_SIZE * buf->npages); in mlx5_buf_alloc()
251 for (i = 0; i != buf->npages; i++) in mlx5_fill_page_array()
/freebsd/sys/dev/drm2/ttm/
H A Dttm_page_alloc.c71 unsigned npages; member
326 static void ttm_pages_put(vm_page_t *pages, unsigned npages) in ttm_pages_put() argument
331 if (set_pages_array_wb(pages, npages)) in ttm_pages_put()
332 printf("[TTM] Failed to set %d pages to wb!\n", npages); in ttm_pages_put()
333 for (i = 0; i < npages; ++i) in ttm_pages_put()
340 pool->npages -= freed_pages; in ttm_pool_update_free_locked()
436 total += _manager->pools[i].npages; in ttm_pool_get_num_unused_pages()
617 && count > pool->npages) { in ttm_page_pool_fill_locked()
635 pool->npages += alloc_size; in ttm_page_pool_fill_locked()
643 pool->npages += cpages; in ttm_page_pool_fill_locked()
[all …]
/freebsd/sys/vm/
H A Dvm_phys.c513 u_long npages; in vm_phys_init() local
525 npages = 0; in vm_phys_init()
542 npages > VM_DMA32_NPAGES_THRESHOLD && in vm_phys_init()
550 npages += atop(seg->end - seg->start); in vm_phys_init()
571 npages = 0; in vm_phys_init()
576 seg->first_page = &vm_page_array[npages]; in vm_phys_init()
577 npages += atop(seg->end - seg->start); in vm_phys_init()
715 int npages; in vm_phys_enq_chunk() local
717 npages = 1 << order; in vm_phys_enq_chunk()
718 m_next = m + npages; in vm_phys_enq_chunk()
[all …]
H A Dvm_reserv.c623 int req, vm_page_t mpred, u_long npages, vm_paddr_t low, vm_paddr_t high, in vm_reserv_alloc_contig() argument
635 KASSERT(npages != 0, ("vm_reserv_alloc_contig: npages is 0")); in vm_reserv_alloc_contig()
641 pindex + npages > object->size) in vm_reserv_alloc_contig()
655 size = npages << PAGE_SHIFT; in vm_reserv_alloc_contig()
668 if (index + npages > VM_LEVEL_0_NPAGES) in vm_reserv_alloc_contig()
682 if (!bit_ntest(rv->popmap, index, index + npages - 1, 0)) in vm_reserv_alloc_contig()
684 if (!vm_domain_allocate(vmd, req, npages)) in vm_reserv_alloc_contig()
686 for (i = 0; i < npages; i++) in vm_reserv_alloc_contig()
705 minpages = VM_RESERV_INDEX(object, pindex) + npages; in vm_reserv_alloc_contig()
763 if (vm_domain_allocate(vmd, req, npages)) { in vm_reserv_alloc_contig()
[all …]
H A Dvm_phys.h62 vm_page_t vm_phys_alloc_contig(int domain, u_long npages, vm_paddr_t low,
64 int vm_phys_alloc_npages(int domain, int pool, int npages, vm_page_t ma[]);
67 void vm_phys_enqueue_contig(vm_page_t m, u_long npages);
73 u_long npages, vm_paddr_t low, vm_paddr_t high);
74 void vm_phys_free_contig(vm_page_t m, u_long npages);
/freebsd/lib/libgeom/
H A Dgeom_stats.c48 static uint npages, spp; variable
58 if (munmap(statp, npages * pagesize) != 0) in geom_stats_close()
79 if (statp != NULL && munmap(statp, npages * pagesize) != 0) in geom_stats_resync()
85 npages = mediasize / pagesize; in geom_stats_resync()
98 npages = 1; in geom_stats_open()
122 sp->ptr = malloc(pagesize * npages); in geom_stats_snapshot_get()
127 explicit_bzero(sp->ptr, pagesize * npages); /* page in, cache */ in geom_stats_snapshot_get()
129 memcpy(sp->ptr, statp, pagesize * npages); in geom_stats_snapshot_get()
130 sp->pages = npages; in geom_stats_snapshot_get()
/freebsd/sys/ofed/drivers/infiniband/core/
H A Dib_umem.c62 for_each_sg(umem->sg_head.sgl, sg, umem->npages, i) { in __ib_umem_release()
93 unsigned long npages; in ib_umem_get() local
157 npages = ib_umem_num_pages(umem); in ib_umem_get()
161 locked = npages + current->mm->pinned_vm; in ib_umem_get()
165 if (npages == 0 || npages > UINT_MAX) { in ib_umem_get()
170 ret = sg_alloc_table(&umem->sg_head, npages, GFP_KERNEL); in ib_umem_get()
180 while (npages) { in ib_umem_get()
182 min_t(unsigned long, npages, in ib_umem_get()
189 umem->npages += ret; in ib_umem_get()
191 npages -= ret; in ib_umem_get()
[all …]
H A Dib_core_uverbs.c198 if (entry->npages * PAGE_SIZE != vma->vm_end - vma->vm_start) { in rdma_user_mmap_entry_get()
218 for (i = 0; i < entry->npages; i++) in rdma_user_mmap_entry_free()
292 u32 xa_first, xa_last, npages; in rdma_user_mmap_entry_insert_range() local
314 npages = (u32)DIV_ROUND_UP(length, PAGE_SIZE); in rdma_user_mmap_entry_insert_range()
315 entry->npages = npages; in rdma_user_mmap_entry_insert_range()
318 for (i = min_pgoff, j = 0; (i + j) <= max_pgoff && j != npages; ) { in rdma_user_mmap_entry_insert_range()
331 if (j != npages) in rdma_user_mmap_entry_insert_range()
/freebsd/sys/kern/
H A Dkern_sendfile.c92 int npages; member
327 for (i = 1; i < sfio->npages; i++) { in sendfile_iodone()
384 mb_free_notready(sfio->m, sfio->npages); in sendfile_iodone()
396 ktls_enqueue(sfio->m, so, sfio->npages); in sendfile_iodone()
400 (void)so->so_proto->pr_ready(so, sfio->m, sfio->npages); in sendfile_iodone()
418 int a, count, count1, grabbed, i, j, npages, rv; in sendfile_swapin() local
421 npages = sfio->npages; in sendfile_swapin()
431 VM_ALLOC_NORMAL | VM_ALLOC_WIRED | flags, pa, npages); in sendfile_swapin()
432 if (grabbed < npages) { in sendfile_swapin()
433 for (int i = grabbed; i < npages; i++) in sendfile_swapin()
[all …]
H A Dkern_physio.c48 int error, i, npages, maxpages; in physio() local
52 npages = 0; in physio()
152 if ((npages = vm_fault_quick_hold_pages( in physio()
162 pages, npages); in physio()
166 bp->bio_ma_n = npages; in physio()
182 pmap_qremove((vm_offset_t)sa, npages); in physio()
183 vm_page_unhold_pages(pages, npages); in physio()
/freebsd/sys/dev/mthca/
H A Dmthca_memfree.c68 pci_unmap_sg(dev->pdev, chunk->mem, chunk->npages, in mthca_free_icm_pages()
71 for (i = 0; i < chunk->npages; ++i) in mthca_free_icm_pages()
80 for (i = 0; i < chunk->npages; ++i) { in mthca_free_icm_coherent()
136 struct mthca_icm *mthca_alloc_icm(struct mthca_dev *dev, int npages, in mthca_alloc_icm() argument
156 while (npages > 0) { in mthca_alloc_icm()
164 chunk->npages = 0; in mthca_alloc_icm()
169 while (1 << cur_order > npages) in mthca_alloc_icm()
174 &chunk->mem[chunk->npages], in mthca_alloc_icm()
177 ret = mthca_alloc_icm_pages(&chunk->mem[chunk->npages], in mthca_alloc_icm()
181 ++chunk->npages; in mthca_alloc_icm()
[all …]
H A Dmthca_allocator.c199 int npages, shift; in mthca_buf_alloc() local
206 npages = 1; in mthca_buf_alloc()
220 npages *= 2; in mthca_buf_alloc()
223 dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL); in mthca_buf_alloc()
227 for (i = 0; i < npages; ++i) in mthca_buf_alloc()
231 npages = (size + PAGE_SIZE - 1) / PAGE_SIZE; in mthca_buf_alloc()
234 dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL); in mthca_buf_alloc()
238 buf->page_list = kmalloc(npages * sizeof *buf->page_list, in mthca_buf_alloc()
243 for (i = 0; i < npages; ++i) in mthca_buf_alloc()
246 for (i = 0; i < npages; ++i) { in mthca_buf_alloc()
[all …]
/freebsd/lib/libusbhid/
H A Dusage.c54 static int npages, npagesmax; variable
62 for (i = 0; i < npages; i++) { in dump_hid_table()
125 if (npages >= npagesmax) { in hid_init()
139 curpage = &pages[npages++]; in hid_init()
166 for (k = 0; k < npages; k++) in hid_usage_page()
181 for (k = 0; k < npages; k++) in hid_usage_in_page()
184 if (k >= npages) in hid_usage_in_page()
210 for (k = 0; k < npages; k++) in hid_parse_usage_page()
228 for (k = 0; k < npages; k++) in hid_parse_usage_in_page()
/freebsd/sys/dev/mlx4/mlx4_core/
H A Dmlx4_mr.c200 int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift, in mlx4_mtt_init() argument
205 if (!npages) { in mlx4_mtt_init()
212 for (mtt->order = 0, i = 1; i < npages; i <<= 1) in mlx4_mtt_init()
423 u64 iova, u64 size, u32 access, int npages, in mlx4_mr_alloc_reserved() argument
433 return mlx4_mtt_init(dev, npages, page_shift, &mr->mtt); in mlx4_mr_alloc_reserved()
533 int npages, int page_shift, struct mlx4_mr *mr) in mlx4_mr_alloc() argument
543 access, npages, page_shift, mr); in mlx4_mr_alloc()
595 u64 iova, u64 size, int npages, in mlx4_mr_rereg_mem_write() argument
600 err = mlx4_mtt_init(dev, npages, page_shift, &mr->mtt); in mlx4_mr_rereg_mem_write()
698 int start_index, int npages, u64 *page_list) in mlx4_write_mtt_chunk() argument
[all …]
H A Dmlx4_icm.c60 pci_unmap_sg(dev->persist->pdev, chunk->mem, chunk->npages, in mlx4_free_icm_pages()
63 for (i = 0; i < chunk->npages; ++i) in mlx4_free_icm_pages()
72 for (i = 0; i < chunk->npages; ++i) in mlx4_free_icm_coherent()
128 struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages, in mlx4_alloc_icm() argument
154 while (npages > 0) { in mlx4_alloc_icm()
169 chunk->npages = 0; in mlx4_alloc_icm()
174 while (1 << cur_order > npages) in mlx4_alloc_icm()
179 &chunk->mem[chunk->npages], in mlx4_alloc_icm()
182 ret = mlx4_alloc_icm_pages(&chunk->mem[chunk->npages], in mlx4_alloc_icm()
193 ++chunk->npages; in mlx4_alloc_icm()
[all …]
/freebsd/sys/contrib/openzfs/module/os/freebsd/spl/
H A Dspl_uio.c146 ASSERT3S(uio->uio_dio.npages, >, 0); in zfs_uio_set_pages_to_stable()
148 for (int i = 0; i < uio->uio_dio.npages; i++) { in zfs_uio_set_pages_to_stable()
162 for (int i = 0; i < uio->uio_dio.npages; i++) { in zfs_uio_release_stable_pages()
203 uio->uio_dio.npages); in zfs_uio_free_dio_pages()
206 uio->uio_dio.npages * sizeof (vm_page_t)); in zfs_uio_free_dio_pages()
237 zfs_uio_rw(uio), &uio->uio_dio.pages[uio->uio_dio.npages]); in zfs_uio_iov_step()
268 uio->uio_dio.npages += numpages; in zfs_uio_get_dio_pages_impl()
288 int npages = DIV_ROUND_UP(zfs_uio_resid(uio), PAGE_SIZE); in zfs_uio_get_dio_pages_alloc() local
289 size_t size = npages * sizeof (vm_page_t); in zfs_uio_get_dio_pages_alloc()
299 uio->uio_dio.npages); in zfs_uio_get_dio_pages_alloc()
[all …]
/freebsd/sys/dev/virtio/balloon/
H A Dvirtio_balloon.c332 vtballoon_inflate(struct vtballoon_softc *sc, int npages) in vtballoon_inflate() argument
340 if (npages > VTBALLOON_PAGES_PER_REQUEST) in vtballoon_inflate()
341 npages = VTBALLOON_PAGES_PER_REQUEST; in vtballoon_inflate()
343 for (i = 0; i < npages; i++) { in vtballoon_inflate()
362 vtballoon_deflate(struct vtballoon_softc *sc, int npages) in vtballoon_deflate() argument
372 if (npages > VTBALLOON_PAGES_PER_REQUEST) in vtballoon_deflate()
373 npages = VTBALLOON_PAGES_PER_REQUEST; in vtballoon_deflate()
375 for (i = 0; i < npages; i++) { in vtballoon_deflate()
406 int npages) in vtballoon_send_page_frames() argument
416 npages * sizeof(uint32_t)); in vtballoon_send_page_frames()
[all …]
/freebsd/sys/arm/nvidia/drm2/
H A Dtegra_bo.c59 pmap_qremove(bo->vbase, bo->npages); in tegra_bo_destruct()
62 for (i = 0; i < bo->npages; i++) { in tegra_bo_destruct()
93 tegra_bo_alloc_contig(size_t npages, u_long alignment, vm_memattr_t memattr, in tegra_bo_alloc_contig() argument
105 m = vm_page_alloc_noobj_contig(VM_ALLOC_WIRED | VM_ALLOC_ZERO, npages, in tegra_bo_alloc_contig()
109 err = vm_page_reclaim_contig(0, npages, low, high, in tegra_bo_alloc_contig()
121 for (i = 0; i < npages; i++, m++) { in tegra_bo_alloc_contig()
144 for (i = 0; i < bo->npages; i++) { in tegra_bo_init_pager()
164 pmap_qenter(bo->vbase, bo->m, bo->npages); in tegra_bo_init_pager()
177 bo->npages = atop(size); in tegra_bo_alloc()
178 bo->m = malloc(sizeof(vm_page_t *) * bo->npages, DRM_MEM_DRIVER, in tegra_bo_alloc()
[all …]
/freebsd/contrib/mandoc/
H A Ddba_read.c45 int32_t im, ip, iv, npages; in dba_read() local
49 npages = dbm_page_count(); in dba_read()
50 dba = dba_new(npages < 128 ? 128 : npages); in dba_read()
51 for (ip = 0; ip < npages; ip++) { in dba_read()
H A Ddbm.c69 static int32_t npages; variable
94 if ((npages = be32toh(*dbm_getint(4))) < 0) { in dbm_open()
96 fname, npages); in dbm_open()
139 return npages; in dbm_page_count()
151 assert(ip < npages); in dbm_page_get()
265 ip = npages; in page_bytitle()
273 while (ip < npages) { in page_bytitle()
289 if (ip == npages) { in page_bytitle()
303 if (++ip < npages) { in page_bytitle()
331 for ( ; ip < npages; ip++) in page_byarch()
/freebsd/sys/dev/mlx5/mlx5_ib/
H A Dmlx5_ib_mr.c159 int npages = 1 << ent->order; in add_keys() local
190 MLX5_SET(mkc, mkc, translations_octword_size, (npages + 1) / 2); in add_keys()
534 int npages; in get_octo_len() local
537 npages = ALIGN(len + offset, page_size) >> ilog2(page_size); in get_octo_len()
538 return (npages + 1) / 2; in get_octo_len()
547 int npages, int page_shift, int *size, in dma_map_mr_pas() argument
558 *size = ALIGN(sizeof(u64) * npages, MLX5_UMR_MTT_ALIGNMENT); in dma_map_mr_pas()
566 memset(pas + npages, 0, *size - npages * sizeof(u64)); in dma_map_mr_pas()
596 umrwr->npages = n; in prep_umr_wqe_common()
625 int access_flags, int *npages, in mr_umem_get() argument
[all …]
/freebsd/sys/dev/gve/
H A Dgve_qpl.c98 gve_alloc_qpl(struct gve_priv *priv, uint32_t id, int npages, bool single_kva) in gve_alloc_qpl() argument
104 if (npages + priv->num_registered_pages > priv->max_registered_pages) { in gve_alloc_qpl()
106 (uintmax_t)npages + priv->num_registered_pages, in gve_alloc_qpl()
115 qpl->dmas = malloc(npages * sizeof(*qpl->dmas), M_GVE_QPL, in gve_alloc_qpl()
118 qpl->pages = malloc(npages * sizeof(*qpl->pages), M_GVE_QPL, in gve_alloc_qpl()
123 qpl->kva = kva_alloc(PAGE_SIZE * npages); in gve_alloc_qpl()
131 for (i = 0; i < npages; i++) { in gve_alloc_qpl()
152 pmap_qenter(qpl->kva, qpl->pages, npages); in gve_alloc_qpl()
154 for (i = 0; i < npages; i++) { in gve_alloc_qpl()
/freebsd/sys/fs/smbfs/
H A Dsmbfs_io.c419 int i, error, nextoff, size, toff, npages, count; in smbfs_getpages()
444 npages = ap->a_count; in smbfs_getpages()
454 if (!vm_page_none_valid(pages[npages - 1]) && --npages == 0) in smbfs_getpages()
464 pmap_qenter(kva, pages, npages); in smbfs_getpages()
466 VM_CNT_ADD(v_vnodepgsin, npages); in smbfs_getpages()
468 count = npages << PAGE_SHIFT; in smbfs_getpages()
481 pmap_qremove(kva, npages); in smbfs_getpages()
493 for (i = 0, toff = 0; i < npages; i++, toff = nextoff) { in smbfs_getpages()
558 int i, npages, count; in smbfs_putpages() local
573 npages = btoc(count); in smbfs_putpages()
[all …]

123456