Home
last modified time | relevance | path

Searched refs:npages (Results 1 – 25 of 97) sorted by relevance

1234

/illumos-gate/usr/src/uts/sun4/io/px/
H A Dpx_fdvma.c59 size_t npages, pg_index; in px_fdvma_load() local
63 npages = MMU_BTOPR(len + offset); in px_fdvma_load()
64 if (!npages) in px_fdvma_load()
69 if (index + npages > mp->dmai_ndvmapages) { in px_fdvma_load()
73 index, npages); in px_fdvma_load()
76 fdvma_p->pagecnt[index] = npages; in px_fdvma_load()
89 if (px_lib_iommu_map(dip, PCI_TSBID(0, pg_index), npages, in px_fdvma_load()
105 size_t npages = fdvma_p->pagecnt[index]; in px_fdvma_unload() local
110 mp->dmai_mapping, MMU_PTOB(index), MMU_PTOB(npages)); in px_fdvma_unload()
112 px_mmu_unmap_pages(px_p->px_mmu_p, mp, dvma_pg, npages); in px_fdvma_unload()
[all …]
H A Dpx_mmu.c178 size_t npages, size_t pfn_index) in px_mmu_map_pages() argument
184 ASSERT(npages <= mp->dmai_ndvmapages); in px_mmu_map_pages()
187 (uint_t)pg_index, dvma_pg, (uint_t)npages, (uint_t)pfn_index); in px_mmu_map_pages()
189 if (px_lib_iommu_map(dip, PCI_TSBID(0, pg_index), npages, in px_mmu_map_pages()
202 pg_index + npages); in px_mmu_map_pages()
206 if (px_lib_iommu_map(dip, PCI_TSBID(0, pg_index + npages), 1, in px_mmu_map_pages()
208 pfn_index + npages - 1, MMU_MAP_PFN) != DDI_SUCCESS) { in px_mmu_map_pages()
212 if (px_lib_iommu_demap(dip, PCI_TSBID(0, pg_index), npages) in px_mmu_map_pages()
229 uint_t npages) in px_mmu_unmap_pages() argument
236 (uint_t)npages); in px_mmu_unmap_pages()
[all …]
H A Dpx_dma.c149 size_t npages = mp->dmai_ndvmapages; in px_dma_freepfn() local
150 if (npages > 1) in px_dma_freepfn()
151 kmem_free(addr, npages * sizeof (px_iopfn_t)); in px_dma_freepfn()
473 px_dma_pgpfn(px_t *px_p, ddi_dma_impl_t *mp, uint_t npages) in px_dma_pgpfn() argument
483 pplist, npages); in px_dma_pgpfn()
484 for (i = 1; i < npages; i++) { in px_dma_pgpfn()
496 for (i = 1; i < npages; i++, pp = pp->p_next) { in px_dma_pgpfn()
519 px_dma_vapfn(px_t *px_p, ddi_dma_impl_t *mp, uint_t npages) in px_dma_vapfn() argument
527 for (i = 1; i < npages; i++, vaddr += MMU_PAGE_SIZE) { in px_dma_vapfn()
554 uint32_t npages = mp->dmai_ndvmapages; in px_dma_pfn() local
[all …]
/illumos-gate/usr/src/uts/sun4u/io/pci/
H A Dpci_fdvma.c60 size_t npages, pg_index; in pci_fdvma_load() local
66 npages = IOMMU_BTOPR(len + offset); in pci_fdvma_load()
67 if (!npages) in pci_fdvma_load()
72 if (index + npages > mp->dmai_ndvmapages) { in pci_fdvma_load()
76 index, npages); in pci_fdvma_load()
93 for (i = 0, a = baseaddr; i < npages; i++, a += IOMMU_PAGE_SIZE) { in pci_fdvma_load()
118 fdvma_p->pagecnt[index] = npages; in pci_fdvma_load()
184 size_t npages; in pci_fdvma_reserve() local
206 npages = dmareq->dmar_object.dmao_size; in pci_fdvma_reserve()
208 -npages) < 0) { in pci_fdvma_reserve()
[all …]
H A Dpci_iommu.c263 dvma_addr_t dvma_pg, size_t npages, size_t pfn_index) in iommu_map_pages() argument
268 size_t pfn_last = pfn_index + npages; in iommu_map_pages()
279 (uint_t)npages, (uint_t)pfn_index); in iommu_map_pages()
345 iommu_unmap_pages(iommu_t *iommu_p, dvma_addr_t dvma_pg, uint_t npages) in iommu_unmap_pages() argument
349 for (; npages; npages--, dvma_pg++, pg_index++) { in iommu_unmap_pages()
360 size_t npages, size_t pfn_index) in iommu_remap_pages() argument
362 iommu_unmap_pages(iommu_p, dvma_pg, npages); in iommu_remap_pages()
363 iommu_map_pages(iommu_p, mp, dvma_pg, npages, pfn_index); in iommu_remap_pages()
383 uint_t npages = IOMMU_BTOP(mp->dmai_winsize); in iommu_unmap_window() local
391 DEBUG2(DBG_UNMAP_WIN, dip, "mp=%p %x pfns:", mp, npages); in iommu_unmap_window()
[all …]
H A Dpci_dma.c330 size_t npages = mp->dmai_ndvmapages; in pci_dma_freepfn() local
331 if (npages > 1) in pci_dma_freepfn()
332 kmem_free(addr, npages * sizeof (iopfn_t)); in pci_dma_freepfn()
652 pci_dma_pgpfn(pci_t *pci_p, ddi_dma_impl_t *mp, uint_t npages) in pci_dma_pgpfn() argument
663 pplist, npages); in pci_dma_pgpfn()
664 for (i = 1; i < npages; i++) { in pci_dma_pgpfn()
677 for (i = 1; i < npages; i++, pp = pp->p_next) { in pci_dma_pgpfn()
702 uint_t npages) in pci_dma_vapfn() argument
718 for (vaddr = sva, i = 1; i < npages; i++, vaddr += IOMMU_PAGE_SIZE) { in pci_dma_vapfn()
765 uint32_t npages = mp->dmai_ndvmapages; in pci_dma_pfn() local
[all …]
/illumos-gate/usr/src/lib/cfgadm_plugins/ac/common/
H A Dmema_test.c136 long npages, pageno; in memory_test_normal() local
151 npages = BANK_SIZE(handle) / PAGE_SIZE(handle); in memory_test_normal()
161 for (pageno = 0; pageno < npages; pageno++) { in memory_test_normal()
168 if ((time(NULL) >= time_rep) || (pageno == npages - 1) || in memory_test_normal()
171 ((pageno + 1) * 100) / npages); in memory_test_normal()
176 for (pageno = npages-1; pageno >= 0; pageno--) { in memory_test_normal()
209 if ((time(NULL) >= time_rep) || (pageno == npages - 1) || in memory_test_normal()
212 ((npages - pageno) * 100) / npages); in memory_test_normal()
218 for (pageno = 0; pageno < npages; pageno++) { in memory_test_normal()
269 if ((time(NULL) >= time_rep) || (pageno == npages - 1) || in memory_test_normal()
[all …]
/illumos-gate/usr/src/uts/i86pc/io/gfx_private/
H A Dgfxp_vm.c89 pgcnt_t npages; in gfxp_map_kernel_space() local
116 npages = btopr(size + pgoffset); in gfxp_map_kernel_space()
117 cvaddr = vmem_alloc(heap_arena, ptob(npages), VM_NOSLEEP); in gfxp_map_kernel_space()
128 hat_devload(kas.a_hat, cvaddr, ptob(npages), pfn, in gfxp_map_kernel_space()
142 pgcnt_t npages; in gfxp_unmap_kernel_space() local
149 npages = btopr(size + pgoffset); in gfxp_unmap_kernel_space()
150 hat_unload(kas.a_hat, base, ptob(npages), HAT_UNLOAD_UNLOCK); in gfxp_unmap_kernel_space()
151 vmem_free(heap_arena, base, ptob(npages)); in gfxp_unmap_kernel_space()
271 pgcnt_t npages; in gfxp_alloc_kernel_space() local
273 npages = btopr(size); in gfxp_alloc_kernel_space()
[all …]
/illumos-gate/usr/src/uts/sun4u/io/
H A Diommu.c293 iommu_tlb_flush(struct sbus_soft_state *softsp, ioaddr_t addr, pgcnt_t npages) in iommu_tlb_flush() argument
300 if (npages == 1) { in iommu_tlb_flush()
306 hiaddr = addr + (ioaddr_t)(npages * IOMMU_PAGESIZE); in iommu_tlb_flush()
360 pgcnt_t npages; in iommu_remove_mappings() local
364 pgcnt_t npages = mp->dmai_ndvmapages; in iommu_remove_mappings() local
378 npages = mp->dmai_ndvmapages; in iommu_remove_mappings()
396 kmem_free(walk->pfn, sizeof (pfn_t) * (npages + 1)); in iommu_remove_mappings()
402 while (npages) { in iommu_remove_mappings()
408 npages--; in iommu_remove_mappings()
421 pgcnt_t npages; in iommu_create_vaddr_mappings() local
[all …]
H A Diocache.c160 sync_stream_buf(struct sbus_soft_state *softsp, ioaddr_t addr, uint_t npages, in sync_stream_buf() argument
173 "0x%x, sync flag 0x%p, sync flag pf 0x%lx\n", addr, npages, in sync_stream_buf()
176 ASSERT(npages > (uint_t)0); in sync_stream_buf()
183 if (npages > stream_buf_sync_using_diag) { in sync_stream_buf()
188 uint_t hiaddr = addr + (npages * IOMMU_PAGESIZE); in sync_stream_buf()
214 addr + (npages * IOMMU_PAGESIZE))); in sync_stream_buf()
231 npages--; in sync_stream_buf()
232 } while (npages > (uint_t)0); in sync_stream_buf()
/illumos-gate/usr/src/uts/common/io/usb/hcd/xhci/
H A Dxhci_context.c33 int i, npages; in xhci_scratchpad_fini() local
34 npages = xhcip->xhci_caps.xcap_max_scratch; in xhci_scratchpad_fini()
35 for (i = 0; i < npages; i++) { in xhci_scratchpad_fini()
39 sizeof (xhci_dma_buffer_t) * npages); in xhci_scratchpad_fini()
57 int npages, i; in xhci_scratchpad_alloc() local
66 npages = xhcip->xhci_caps.xcap_max_scratch; in xhci_scratchpad_alloc()
71 B_TRUE, sizeof (uint64_t) * npages, B_FALSE) == B_FALSE) { in xhci_scratchpad_alloc()
87 xsp->xsp_scratch_dma = kmem_zalloc(sizeof (xhci_dma_buffer_t) * npages, in xhci_scratchpad_alloc()
89 for (i = 0; i < npages; i++) { in xhci_scratchpad_alloc()
164 int i, npages; in xhci_context_init() local
[all …]
/illumos-gate/usr/src/uts/common/vm/
H A Dseg_spt.c424 pgcnt_t npages; in segspt_shmincore() local
458 npages = btopr(len); in segspt_shmincore()
459 if (anon_index + npages > btopr(shmd->shm_amp->size)) { in segspt_shmincore()
463 for (i = 0; i < npages; i++, anon_index++) { in segspt_shmincore()
528 pgcnt_t npages = btopr(amp->size); in segspt_create() local
549 if (err = anon_swap_adjust(npages)) in segspt_create()
559 if ((ppa = kmem_zalloc(((sizeof (page_t *)) * npages), in segspt_create()
592 NULL, 0, ptob(npages)); in segspt_create()
611 more_pgs = new_npgs - npages; in segspt_create()
626 (void) anon_copy_ptr(amp->ahp, 0, nahp, 0, npages, in segspt_create()
[all …]
H A Dvm_anon.c290 anon_create(pgcnt_t npages, int flags) in anon_create() argument
304 ahp->size = npages; in anon_create()
305 if (npages <= ANON_CHUNK_SIZE || (flags & ANON_ALLOC_FORCE)) { in anon_create()
329 ahp->size = P2ROUNDUP(npages, ANON_CHUNK_SIZE); in anon_create()
347 anon_release(struct anon_hdr *ahp, pgcnt_t npages) in anon_release() argument
353 ASSERT(npages <= ahp->size); in anon_release()
358 if (npages <= ANON_CHUNK_SIZE || (ahp->flags & ANON_ALLOC_FORCE)) { in anon_release()
515 ulong_t d_idx, pgcnt_t npages, int flags) in anon_copy_ptr() argument
522 ASSERT((npages <= sahp->size) && (npages <= dahp->size)); in anon_copy_ptr()
533 npages * sizeof (struct anon *)); in anon_copy_ptr()
[all …]
H A Dvm_seg.c533 pgcnt_t npages = 0; in seg_pinactive() local
625 npages = btop(len); in seg_pinactive()
716 if (npages) { in seg_pinactive()
718 ASSERT(seg_plocked >= npages); in seg_pinactive()
719 seg_plocked -= npages; in seg_pinactive()
721 ASSERT(seg_plocked_window >= npages); in seg_pinactive()
722 seg_plocked_window -= npages; in seg_pinactive()
806 pgcnt_t npages; in seg_pinsert() local
829 npages = btop(len); in seg_pinsert()
832 if (seg_plocked_window + npages > seg_pmaxwindow) { in seg_pinsert()
[all …]
H A Dvm_page.c528 page_free_large_ctr(pgcnt_t npages) in page_free_large_ctr() argument
533 freemem += npages; in page_free_large_ctr()
535 lump = roundup(npages, pcf_fanout) / pcf_fanout; in page_free_large_ctr()
537 while (npages > 0) { in page_free_large_ctr()
541 if (lump < npages) { in page_free_large_ctr()
543 npages -= lump; in page_free_large_ctr()
545 p->pcf_count += (uint_t)npages; in page_free_large_ctr()
546 npages = 0; in page_free_large_ctr()
555 ASSERT(npages == 0); in page_free_large_ctr()
1442 page_needfree(spgcnt_t npages) in page_needfree() argument
[all …]
H A Dseg_kmem.c453 pgcnt_t npages; in segkmem_fault() local
474 npages = btopr(size); in segkmem_fault()
478 for (pg = 0; pg < npages; pg++) { in segkmem_fault()
504 while (npages--) { in segkmem_fault()
697 pgcnt_t npages; in segkmem_pagelock() local
711 npages = btopr(len); in segkmem_pagelock()
712 nb = sizeof (page_t *) * npages; in segkmem_pagelock()
718 for (pg = 0; pg < npages; pg++) { in segkmem_pagelock()
734 for (pg = 0; pg < npages; pg++) { in segkmem_pagelock()
875 pgcnt_t npages = btopr(size); in segkmem_xalloc() local
[all …]
/illumos-gate/usr/src/uts/i86pc/os/
H A Dmp_implfuncs.c211 pgcnt_t npages; in psm_map_phys_new() local
232 npages = mmu_btopr(len + pgoffset); in psm_map_phys_new()
233 cvaddr = device_arena_alloc(ptob(npages), VM_NOSLEEP); in psm_map_phys_new()
236 hat_devload(kas.a_hat, cvaddr, mmu_ptob(npages), mmu_btop(base), in psm_map_phys_new()
246 pgcnt_t npages; in psm_unmap_phys() local
253 npages = mmu_btopr(len + pgoffset); in psm_unmap_phys()
254 hat_unload(kas.a_hat, base, ptob(npages), HAT_UNLOAD_UNLOCK); in psm_unmap_phys()
255 device_arena_free(base, ptob(npages)); in psm_unmap_phys()
290 pgcnt_t npages; in psm_unmap() local
297 npages = mmu_btopr(len + pgoffset); in psm_unmap()
[all …]
/illumos-gate/usr/src/cmd/mandoc/
H A Ddba_read.c45 int32_t im, ip, iv, npages; in dba_read() local
49 npages = dbm_page_count(); in dba_read()
50 dba = dba_new(npages < 128 ? 128 : npages); in dba_read()
51 for (ip = 0; ip < npages; ip++) { in dba_read()
H A Ddbm.c69 static int32_t npages; variable
94 if ((npages = be32toh(*dbm_getint(4))) < 0) { in dbm_open()
96 fname, npages); in dbm_open()
139 return npages; in dbm_page_count()
151 assert(ip < npages); in dbm_page_get()
265 ip = npages; in page_bytitle()
273 while (ip < npages) { in page_bytitle()
289 if (ip == npages) { in page_bytitle()
303 if (++ip < npages) { in page_bytitle()
331 for ( ; ip < npages; ip++) in page_byarch()
/illumos-gate/usr/src/uts/sun4v/io/
H A Dldc_shm.c358 pgcnt_t npages; in i_ldc_mem_bind_handle() local
469 npages = (len+v_offset)/pg_size; in i_ldc_mem_bind_handle()
470 npages = ((len+v_offset)%pg_size == 0) ? npages : npages+1; in i_ldc_mem_bind_handle()
474 ldcp->id, vaddr, v_align, v_offset, npages); in i_ldc_mem_bind_handle()
479 if (npages > mtbl->num_avail) { in i_ldc_mem_bind_handle()
492 memseg->pages = kmem_zalloc((sizeof (ldc_page_t) * npages), KM_SLEEP); in i_ldc_mem_bind_handle()
494 kmem_zalloc((sizeof (ldc_mem_cookie_t) * npages), KM_SLEEP); in i_ldc_mem_bind_handle()
497 ldcp->id, npages); in i_ldc_mem_bind_handle()
544 for (i = 0; i < npages; i++) { in i_ldc_mem_bind_handle()
588 } else if (i == (npages - 1)) { in i_ldc_mem_bind_handle()
[all …]
/illumos-gate/usr/src/uts/sun4/vm/
H A Dsfmmu.c756 calc_tsb_sizes(pgcnt_t npages) in calc_tsb_sizes() argument
758 PRM_DEBUG(npages); in calc_tsb_sizes()
760 if (npages <= TSB_FREEMEM_MIN) { in calc_tsb_sizes()
763 } else if (npages <= TSB_FREEMEM_LARGE / 2) { in calc_tsb_sizes()
766 } else if (npages <= TSB_FREEMEM_LARGE) { in calc_tsb_sizes()
769 } else if (npages <= TSB_FREEMEM_LARGE * 2 || in calc_tsb_sizes()
774 ktsb_szcode = highbit(npages - 1); in calc_tsb_sizes()
787 ktsb4m_szcode = highbit((2 * npages) / TTEPAGES(TTE4M) - 1); in calc_tsb_sizes()
806 ndata_alloc_tsbs(struct memlist *ndata, pgcnt_t npages) in ndata_alloc_tsbs() argument
817 calc_tsb_sizes(npages); in ndata_alloc_tsbs()
[all …]
/illumos-gate/usr/src/uts/common/io/mlxcx/
H A Dmlxcx_intr.c435 mlxcx_give_pages_once(mlxcx_t *mlxp, size_t npages) in mlxcx_give_pages_once() argument
447 npages = MIN(npages, MLXCX_MANAGE_PAGES_MAX_PAGES); in mlxcx_give_pages_once()
449 pages = kmem_zalloc(sizeof (*pages) * npages, KM_SLEEP); in mlxcx_give_pages_once()
451 for (i = 0; i < npages; i++) { in mlxcx_give_pages_once()
458 npages); in mlxcx_give_pages_once()
470 MLXCX_MANAGE_PAGES_OPMOD_GIVE_PAGES, npages, pages)) { in mlxcx_give_pages_once()
472 "pages!", npages); in mlxcx_give_pages_once()
477 for (i = 0; i < npages; i++) { in mlxcx_give_pages_once()
480 mlxp->mlx_npages += npages; in mlxcx_give_pages_once()
483 kmem_free(pages, sizeof (*pages) * npages); in mlxcx_give_pages_once()
[all …]
/illumos-gate/usr/src/psm/stand/cpr/sparcv9/sun4u/
H A Dpages.c280 int npages, compressed, regular; in cb_restore_kpages() local
288 npages = compressed = regular = 0; in cb_restore_kpages()
289 while (npages < sfile.kpages) { in cb_restore_kpages()
298 npages += desc.cpd_pages; in cb_restore_kpages()
326 str, sfile.kpages, npages, compressed, regular); in cb_restore_kpages()
331 if (npages != sfile.kpages) { in cb_restore_kpages()
333 str, sfile.kpages, npages); in cb_restore_kpages()
/illumos-gate/usr/src/uts/common/io/sfxge/common/
H A Dmcdi_mon.c382 __out_ecount(npages) uint32_t *sensor_maskp, in efx_mcdi_sensor_info()
383 __in size_t npages) in efx_mcdi_sensor_info() argument
393 for (page = 0; page < npages; page++) { in efx_mcdi_sensor_info()
414 if ((page != (npages - 1)) && in efx_mcdi_sensor_info()
422 if (sensor_maskp[npages - 1] & (1U << MC_CMD_SENSOR_PAGE0_NEXT)) { in efx_mcdi_sensor_info()
472 uint32_t npages; in mcdi_mon_cfg_build() local
497 npages = 0; in mcdi_mon_cfg_build()
498 if ((rc = efx_mcdi_sensor_info_npages(enp, &npages)) != 0) in mcdi_mon_cfg_build()
501 encp->enc_mon_stat_dma_buf_size = npages * EFX_MON_STATS_PAGE_SIZE; in mcdi_mon_cfg_build()
502 encp->enc_mcdi_sensor_mask_size = npages * sizeof (uint32_t); in mcdi_mon_cfg_build()
[all …]
/illumos-gate/usr/src/uts/common/io/
H A Dramdisk.c309 rd_phys_alloc(pgcnt_t npages) in rd_phys_alloc() argument
317 if (rd_tot_physmem + npages > rd_max_physmem) in rd_phys_alloc()
320 if (!page_resv(npages, KM_NOSLEEP)) in rd_phys_alloc()
323 if (!page_create_wait(npages, 0)) { in rd_phys_alloc()
324 page_unresv(npages); in rd_phys_alloc()
328 ppalen = npages * sizeof (struct page_t *); in rd_phys_alloc()
331 page_create_putback(npages); in rd_phys_alloc()
332 page_unresv(npages); in rd_phys_alloc()
337 for (i = 0, addr = NULL; i < npages; ++i, addr += PAGESIZE) { in rd_phys_alloc()
352 for (i = 0; i < npages; i++) in rd_phys_alloc()
[all …]

1234