/titanic_44/usr/src/uts/sun4/io/px/ |
H A D | px_fdvma.c | 59 size_t npages, pg_index; in px_fdvma_load() local 63 npages = MMU_BTOPR(len + offset); in px_fdvma_load() 64 if (!npages) in px_fdvma_load() 69 if (index + npages > mp->dmai_ndvmapages) { in px_fdvma_load() 73 index, npages); in px_fdvma_load() 76 fdvma_p->pagecnt[index] = npages; in px_fdvma_load() 89 if (px_lib_iommu_map(dip, PCI_TSBID(0, pg_index), npages, in px_fdvma_load() 105 size_t npages = fdvma_p->pagecnt[index]; in px_fdvma_unload() local 110 mp->dmai_mapping, MMU_PTOB(index), MMU_PTOB(npages)); in px_fdvma_unload() 112 px_mmu_unmap_pages(px_p->px_mmu_p, mp, dvma_pg, npages); in px_fdvma_unload() [all …]
|
H A D | px_mmu.c | 178 size_t npages, size_t pfn_index) in px_mmu_map_pages() argument 184 ASSERT(npages <= mp->dmai_ndvmapages); in px_mmu_map_pages() 187 (uint_t)pg_index, dvma_pg, (uint_t)npages, (uint_t)pfn_index); in px_mmu_map_pages() 189 if (px_lib_iommu_map(dip, PCI_TSBID(0, pg_index), npages, in px_mmu_map_pages() 202 pg_index + npages); in px_mmu_map_pages() 206 if (px_lib_iommu_map(dip, PCI_TSBID(0, pg_index + npages), 1, in px_mmu_map_pages() 208 pfn_index + npages - 1, MMU_MAP_PFN) != DDI_SUCCESS) { in px_mmu_map_pages() 212 if (px_lib_iommu_demap(dip, PCI_TSBID(0, pg_index), npages) in px_mmu_map_pages() 229 uint_t npages) in px_mmu_unmap_pages() argument 236 (uint_t)npages); in px_mmu_unmap_pages() [all …]
|
H A D | px_dma.c | 147 size_t npages = mp->dmai_ndvmapages; in px_dma_freepfn() local 148 if (npages > 1) in px_dma_freepfn() 149 kmem_free(addr, npages * sizeof (px_iopfn_t)); in px_dma_freepfn() 471 px_dma_pgpfn(px_t *px_p, ddi_dma_impl_t *mp, uint_t npages) in px_dma_pgpfn() argument 481 pplist, npages); in px_dma_pgpfn() 482 for (i = 1; i < npages; i++) { in px_dma_pgpfn() 494 for (i = 1; i < npages; i++, pp = pp->p_next) { in px_dma_pgpfn() 517 px_dma_vapfn(px_t *px_p, ddi_dma_impl_t *mp, uint_t npages) in px_dma_vapfn() argument 525 for (i = 1; i < npages; i++, vaddr += MMU_PAGE_SIZE) { in px_dma_vapfn() 552 uint32_t npages = mp->dmai_ndvmapages; in px_dma_pfn() local [all …]
|
/titanic_44/usr/src/uts/sun4u/io/pci/ |
H A D | pci_fdvma.c | 60 size_t npages, pg_index; in pci_fdvma_load() local 66 npages = IOMMU_BTOPR(len + offset); in pci_fdvma_load() 67 if (!npages) in pci_fdvma_load() 72 if (index + npages > mp->dmai_ndvmapages) { in pci_fdvma_load() 76 index, npages); in pci_fdvma_load() 93 for (i = 0, a = baseaddr; i < npages; i++, a += IOMMU_PAGE_SIZE) { in pci_fdvma_load() 118 fdvma_p->pagecnt[index] = npages; in pci_fdvma_load() 184 size_t npages; in pci_fdvma_reserve() local 206 npages = dmareq->dmar_object.dmao_size; in pci_fdvma_reserve() 208 -npages) < 0) { in pci_fdvma_reserve() [all …]
|
H A D | pci_iommu.c | 263 dvma_addr_t dvma_pg, size_t npages, size_t pfn_index) in iommu_map_pages() argument 268 size_t pfn_last = pfn_index + npages; in iommu_map_pages() 279 (uint_t)npages, (uint_t)pfn_index); in iommu_map_pages() 345 iommu_unmap_pages(iommu_t *iommu_p, dvma_addr_t dvma_pg, uint_t npages) in iommu_unmap_pages() argument 349 for (; npages; npages--, dvma_pg++, pg_index++) { in iommu_unmap_pages() 360 size_t npages, size_t pfn_index) in iommu_remap_pages() argument 362 iommu_unmap_pages(iommu_p, dvma_pg, npages); in iommu_remap_pages() 363 iommu_map_pages(iommu_p, mp, dvma_pg, npages, pfn_index); in iommu_remap_pages() 383 uint_t npages = IOMMU_BTOP(mp->dmai_winsize); in iommu_unmap_window() local 391 DEBUG2(DBG_UNMAP_WIN, dip, "mp=%p %x pfns:", mp, npages); in iommu_unmap_window() [all …]
|
H A D | pci_dma.c | 328 size_t npages = mp->dmai_ndvmapages; in pci_dma_freepfn() local 329 if (npages > 1) in pci_dma_freepfn() 330 kmem_free(addr, npages * sizeof (iopfn_t)); in pci_dma_freepfn() 650 pci_dma_pgpfn(pci_t *pci_p, ddi_dma_impl_t *mp, uint_t npages) in pci_dma_pgpfn() argument 661 pplist, npages); in pci_dma_pgpfn() 662 for (i = 1; i < npages; i++) { in pci_dma_pgpfn() 675 for (i = 1; i < npages; i++, pp = pp->p_next) { in pci_dma_pgpfn() 700 uint_t npages) in pci_dma_vapfn() argument 716 for (vaddr = sva, i = 1; i < npages; i++, vaddr += IOMMU_PAGE_SIZE) { in pci_dma_vapfn() 763 uint32_t npages = mp->dmai_ndvmapages; in pci_dma_pfn() local [all …]
|
/titanic_44/usr/src/lib/cfgadm_plugins/ac/common/ |
H A D | mema_test.c | 138 long npages, pageno; in memory_test_normal() local 153 npages = BANK_SIZE(handle) / PAGE_SIZE(handle); in memory_test_normal() 163 for (pageno = 0; pageno < npages; pageno++) { in memory_test_normal() 170 if ((time(NULL) >= time_rep) || (pageno == npages - 1) || in memory_test_normal() 173 ((pageno + 1) * 100) / npages); in memory_test_normal() 178 for (pageno = npages-1; pageno >= 0; pageno--) { in memory_test_normal() 211 if ((time(NULL) >= time_rep) || (pageno == npages - 1) || in memory_test_normal() 214 ((npages - pageno) * 100) / npages); in memory_test_normal() 220 for (pageno = 0; pageno < npages; pageno++) { in memory_test_normal() 271 if ((time(NULL) >= time_rep) || (pageno == npages - 1) || in memory_test_normal() [all …]
|
/titanic_44/usr/src/uts/sun4u/io/ |
H A D | iommu.c | 293 iommu_tlb_flush(struct sbus_soft_state *softsp, ioaddr_t addr, pgcnt_t npages) in iommu_tlb_flush() argument 300 if (npages == 1) { in iommu_tlb_flush() 306 hiaddr = addr + (ioaddr_t)(npages * IOMMU_PAGESIZE); in iommu_tlb_flush() 360 pgcnt_t npages; in iommu_remove_mappings() local 364 pgcnt_t npages = mp->dmai_ndvmapages; in iommu_remove_mappings() local 378 npages = mp->dmai_ndvmapages; in iommu_remove_mappings() 396 kmem_free(walk->pfn, sizeof (pfn_t) * (npages + 1)); in iommu_remove_mappings() 402 while (npages) { in iommu_remove_mappings() 408 npages--; in iommu_remove_mappings() 421 pgcnt_t npages; in iommu_create_vaddr_mappings() local [all …]
|
H A D | iocache.c | 160 sync_stream_buf(struct sbus_soft_state *softsp, ioaddr_t addr, uint_t npages, in sync_stream_buf() argument 173 "0x%x, sync flag 0x%p, sync flag pf 0x%lx\n", addr, npages, in sync_stream_buf() 176 ASSERT(npages > (uint_t)0); in sync_stream_buf() 183 if (npages > stream_buf_sync_using_diag) { in sync_stream_buf() 188 uint_t hiaddr = addr + (npages * IOMMU_PAGESIZE); in sync_stream_buf() 214 addr + (npages * IOMMU_PAGESIZE))); in sync_stream_buf() 231 npages--; in sync_stream_buf() 232 } while (npages > (uint_t)0); in sync_stream_buf()
|
/titanic_44/usr/src/uts/common/vm/ |
H A D | seg_spt.c | 277 pgcnt_t npages; in segspt_shmincore() local 311 npages = btopr(len); in segspt_shmincore() 312 if (anon_index + npages > btopr(shmd->shm_amp->size)) { in segspt_shmincore() 316 for (i = 0; i < npages; i++, anon_index++) { in segspt_shmincore() 380 pgcnt_t npages = btopr(amp->size); in segspt_create() local 405 if (err = anon_swap_adjust(npages)) in segspt_create() 414 if ((ppa = kmem_zalloc(((sizeof (page_t *)) * npages), in segspt_create() 447 NULL, 0, ptob(npages)); in segspt_create() 466 more_pgs = new_npgs - npages; in segspt_create() 481 (void) anon_copy_ptr(amp->ahp, 0, nahp, 0, npages, in segspt_create() [all …]
|
H A D | vm_anon.c | 291 anon_create(pgcnt_t npages, int flags) in anon_create() argument 305 ahp->size = npages; in anon_create() 306 if (npages <= ANON_CHUNK_SIZE || (flags & ANON_ALLOC_FORCE)) { in anon_create() 330 ahp->size = P2ROUNDUP(npages, ANON_CHUNK_SIZE); in anon_create() 348 anon_release(struct anon_hdr *ahp, pgcnt_t npages) in anon_release() argument 354 ASSERT(npages <= ahp->size); in anon_release() 359 if (npages <= ANON_CHUNK_SIZE || (ahp->flags & ANON_ALLOC_FORCE)) { in anon_release() 517 pgcnt_t npages, int flags) in anon_copy_ptr() argument 524 ASSERT((npages <= sahp->size) && (npages <= dahp->size)); in anon_copy_ptr() 535 npages * sizeof (struct anon *)); in anon_copy_ptr() [all …]
|
H A D | vm_seg.c | 533 pgcnt_t npages = 0; in seg_pinactive() local 625 npages = btop(len); in seg_pinactive() 716 if (npages) { in seg_pinactive() 718 ASSERT(seg_plocked >= npages); in seg_pinactive() 719 seg_plocked -= npages; in seg_pinactive() 721 ASSERT(seg_plocked_window >= npages); in seg_pinactive() 722 seg_plocked_window -= npages; in seg_pinactive() 806 pgcnt_t npages; in seg_pinsert() local 829 npages = btop(len); in seg_pinsert() 832 if (seg_plocked_window + npages > seg_pmaxwindow) { in seg_pinsert() [all …]
|
H A D | vm_page.c | 518 page_free_large_ctr(pgcnt_t npages) in page_free_large_ctr() argument 523 freemem += npages; in page_free_large_ctr() 525 lump = roundup(npages, pcf_fanout) / pcf_fanout; in page_free_large_ctr() 527 while (npages > 0) { in page_free_large_ctr() 531 if (lump < npages) { in page_free_large_ctr() 533 npages -= lump; in page_free_large_ctr() 535 p->pcf_count += (uint_t)npages; in page_free_large_ctr() 536 npages = 0; in page_free_large_ctr() 545 ASSERT(npages == 0); in page_free_large_ctr() 1434 page_needfree(spgcnt_t npages) in page_needfree() argument [all …]
|
H A D | seg_kmem.c | 445 pgcnt_t npages; in segkmem_fault() local 466 npages = btopr(size); in segkmem_fault() 470 for (pg = 0; pg < npages; pg++) { in segkmem_fault() 496 while (npages--) { in segkmem_fault() 683 pgcnt_t npages; in segkmem_pagelock() local 697 npages = btopr(len); in segkmem_pagelock() 698 nb = sizeof (page_t *) * npages; in segkmem_pagelock() 704 for (pg = 0; pg < npages; pg++) { in segkmem_pagelock() 720 for (pg = 0; pg < npages; pg++) { in segkmem_pagelock() 865 pgcnt_t npages = btopr(size); in segkmem_xalloc() local [all …]
|
/titanic_44/usr/src/uts/i86pc/io/gfx_private/ |
H A D | gfxp_vm.c | 90 pgcnt_t npages; in gfxp_map_kernel_space() local 117 npages = btopr(size + pgoffset); in gfxp_map_kernel_space() 118 cvaddr = vmem_alloc(heap_arena, ptob(npages), VM_NOSLEEP); in gfxp_map_kernel_space() 129 hat_devload(kas.a_hat, cvaddr, ptob(npages), pfn, in gfxp_map_kernel_space() 143 pgcnt_t npages; in gfxp_unmap_kernel_space() local 150 npages = btopr(size + pgoffset); in gfxp_unmap_kernel_space() 151 hat_unload(kas.a_hat, base, ptob(npages), HAT_UNLOAD_UNLOCK); in gfxp_unmap_kernel_space() 152 vmem_free(heap_arena, base, ptob(npages)); in gfxp_unmap_kernel_space()
|
/titanic_44/usr/src/uts/i86pc/os/ |
H A D | mp_implfuncs.c | 209 pgcnt_t npages; in psm_map_phys_new() local 230 npages = mmu_btopr(len + pgoffset); in psm_map_phys_new() 231 cvaddr = device_arena_alloc(ptob(npages), VM_NOSLEEP); in psm_map_phys_new() 234 hat_devload(kas.a_hat, cvaddr, mmu_ptob(npages), mmu_btop(base), in psm_map_phys_new() 244 pgcnt_t npages; in psm_unmap_phys() local 251 npages = mmu_btopr(len + pgoffset); in psm_unmap_phys() 252 hat_unload(kas.a_hat, base, ptob(npages), HAT_UNLOAD_UNLOCK); in psm_unmap_phys() 253 device_arena_free(base, ptob(npages)); in psm_unmap_phys() 288 pgcnt_t npages; in psm_unmap() local 295 npages = mmu_btopr(len + pgoffset); in psm_unmap() [all …]
|
H A D | pmem.c | 290 pgcnt_t npages = btopr(size); in devmap_pmem_alloc() local 303 if (npages == 0 || npages >= total_pages / 2) in devmap_pmem_alloc() 311 if (pmem_cookie_alloc(&pcp, npages, kflags) == DDI_FAILURE) in devmap_pmem_alloc() 313 pcp->dp_npages = npages; in devmap_pmem_alloc() 319 if (pmem_lock(npages, curproc) == DDI_FAILURE) in devmap_pmem_alloc() 327 tpages = mpool_break(&tlist, npages); in devmap_pmem_alloc() 333 if (tpages == npages) in devmap_pmem_alloc() 336 rpages = npages - tpages; in devmap_pmem_alloc() 379 for (pp = tlist, i = 0; i < npages; i++) { in devmap_pmem_alloc() 409 kmem_free(pcp->dp_pparray, npages * sizeof (page_t *)); in devmap_pmem_alloc() [all …]
|
/titanic_44/usr/src/uts/sun4u/starfire/os/ |
H A D | pda.c | 139 pgcnt_t npages; in pda_get_mem_size() local 160 npages = 0; in pda_get_mem_size() 178 npages += c_endpfn - c_basepfn; in pda_get_mem_size() 181 return (npages); in pda_get_mem_size() 225 pgcnt_t d_npgs, npages; in pda_mem_del_span() local 234 npages = 0; in pda_mem_del_span() 246 npages += p_npgs; in pda_mem_del_span() 270 npages += p_npgs; in pda_mem_del_span() 281 npages += endp->Memc_Size; in pda_mem_del_span() 292 npages += p_npgs; in pda_mem_del_span() [all …]
|
/titanic_44/usr/src/uts/sun4/vm/ |
H A D | sfmmu.c | 756 calc_tsb_sizes(pgcnt_t npages) in calc_tsb_sizes() argument 758 PRM_DEBUG(npages); in calc_tsb_sizes() 760 if (npages <= TSB_FREEMEM_MIN) { in calc_tsb_sizes() 763 } else if (npages <= TSB_FREEMEM_LARGE / 2) { in calc_tsb_sizes() 766 } else if (npages <= TSB_FREEMEM_LARGE) { in calc_tsb_sizes() 769 } else if (npages <= TSB_FREEMEM_LARGE * 2 || in calc_tsb_sizes() 774 ktsb_szcode = highbit(npages - 1); in calc_tsb_sizes() 787 ktsb4m_szcode = highbit((2 * npages) / TTEPAGES(TTE4M) - 1); in calc_tsb_sizes() 806 ndata_alloc_tsbs(struct memlist *ndata, pgcnt_t npages) in ndata_alloc_tsbs() argument 817 calc_tsb_sizes(npages); in ndata_alloc_tsbs() [all …]
|
/titanic_44/usr/src/uts/sun4v/io/ |
H A D | ldc_shm.c | 358 pgcnt_t npages; in i_ldc_mem_bind_handle() local 469 npages = (len+v_offset)/pg_size; in i_ldc_mem_bind_handle() 470 npages = ((len+v_offset)%pg_size == 0) ? npages : npages+1; in i_ldc_mem_bind_handle() 474 ldcp->id, vaddr, v_align, v_offset, npages); in i_ldc_mem_bind_handle() 479 if (npages > mtbl->num_avail) { in i_ldc_mem_bind_handle() 492 memseg->pages = kmem_zalloc((sizeof (ldc_page_t) * npages), KM_SLEEP); in i_ldc_mem_bind_handle() 494 kmem_zalloc((sizeof (ldc_mem_cookie_t) * npages), KM_SLEEP); in i_ldc_mem_bind_handle() 497 ldcp->id, npages); in i_ldc_mem_bind_handle() 544 for (i = 0; i < npages; i++) { in i_ldc_mem_bind_handle() 588 } else if (i == (npages - 1)) { in i_ldc_mem_bind_handle() [all …]
|
/titanic_44/usr/src/psm/stand/cpr/sparcv9/sun4u/ |
H A D | pages.c | 280 int npages, compressed, regular; in cb_restore_kpages() local 288 npages = compressed = regular = 0; in cb_restore_kpages() 289 while (npages < sfile.kpages) { in cb_restore_kpages() 298 npages += desc.cpd_pages; in cb_restore_kpages() 326 str, sfile.kpages, npages, compressed, regular); in cb_restore_kpages() 331 if (npages != sfile.kpages) { in cb_restore_kpages() 333 str, sfile.kpages, npages); in cb_restore_kpages()
|
/titanic_44/usr/src/uts/common/io/ |
H A D | ramdisk.c | 309 rd_phys_alloc(pgcnt_t npages) in rd_phys_alloc() argument 317 if (rd_tot_physmem + npages > rd_max_physmem) in rd_phys_alloc() 320 if (!page_resv(npages, KM_NOSLEEP)) in rd_phys_alloc() 323 if (!page_create_wait(npages, 0)) { in rd_phys_alloc() 324 page_unresv(npages); in rd_phys_alloc() 328 ppalen = npages * sizeof (struct page_t *); in rd_phys_alloc() 331 page_create_putback(npages); in rd_phys_alloc() 332 page_unresv(npages); in rd_phys_alloc() 337 for (i = 0, addr = NULL; i < npages; ++i, addr += PAGESIZE) { in rd_phys_alloc() 352 for (i = 0; i < npages; i++) in rd_phys_alloc() [all …]
|
/titanic_44/usr/src/uts/sun4u/serengeti/io/ |
H A D | sbdp_cpu.c | 354 pgcnt_t npages; member 387 cpusram_map(&map.vaddr, &map.npages) != DDI_SUCCESS) { in sbdp_cpu_poweroff() 400 cpusram_unmap(&map.vaddr, map.npages); in sbdp_cpu_poweroff() 735 pgcnt_t npages; in cpusram_map() local 751 npages = mmu_btopr(SBDP_CPU_SRAM_SIZE + pgoffset); in cpusram_map() 753 kaddr = vmem_alloc(heap_arena, ptob(npages), VM_NOSLEEP); in cpusram_map() 761 hat_devload(kas.a_hat, kaddr, ptob(npages), pfn, mapping_attr, in cpusram_map() 765 *npp = npages; in cpusram_map() 771 cpusram_unmap(caddr_t *vaddrp, pgcnt_t npages) in cpusram_unmap() argument 780 hat_unload(kas.a_hat, base, ptob(npages), HAT_UNLOAD_UNLOCK); in cpusram_unmap() [all …]
|
/titanic_44/usr/src/uts/i86pc/io/ |
H A D | immu_dvma.c | 934 uint64_t npages; in map_unity_domain() local 964 npages = mp->ml_size/IMMU_PAGESIZE + 1; in map_unity_domain() 967 dcookies[0].dck_npages = npages; in map_unity_domain() 969 (void) dvma_map(domain, start, npages, dcookies, in map_unity_domain() 982 npages = mp->ml_size/IMMU_PAGESIZE + 1; in map_unity_domain() 985 dcookies[0].dck_npages = npages; in map_unity_domain() 987 (void) dvma_map(domain, start, npages, in map_unity_domain() 999 npages = mp->ml_size/IMMU_PAGESIZE + 1; in map_unity_domain() 1002 dcookies[0].dck_npages = npages; in map_unity_domain() 1004 (void) dvma_map(domain, start, npages, in map_unity_domain() [all …]
|
/titanic_44/usr/src/uts/common/os/ |
H A D | bp_map.c | 94 pgcnt_t npages; in bp_mapin_common() local 109 npages = btop(size); in bp_mapin_common() 112 if ((bp->b_flags & (B_SHADOW | B_PAGEIO)) && (npages == 1) && in bp_mapin_common() 164 while (npages-- != 0) { in bp_mapin_common() 208 pgcnt_t npages; in bp_mapout() local 217 npages = btop(size); in bp_mapout() 221 if ((bp->b_flags & (B_SHADOW | B_PAGEIO)) && (npages == 1) && in bp_mapout()
|