| /linux/arch/arm64/mm/ |
| H A D | hugetlbpage.c | 83 pte_t *ptep, size_t *pgsize) in find_num_contig() argument 90 *pgsize = PAGE_SIZE; in find_num_contig() 95 *pgsize = PMD_SIZE; in find_num_contig() 101 static inline int num_contig_ptes(unsigned long size, size_t *pgsize) in num_contig_ptes() argument 105 *pgsize = size; in num_contig_ptes() 109 *pgsize = PMD_SIZE; in num_contig_ptes() 113 *pgsize = PAGE_SIZE; in num_contig_ptes() 126 size_t pgsize; in huge_ptep_get() local 132 ncontig = find_num_contig(mm, addr, ptep, &pgsize); in huge_ptep_get() 156 unsigned long pgsize, in get_clear_contig() argument [all …]
|
| /linux/drivers/mtd/tests/ |
| H A D | torturetest.c | 70 static int pgsize; variable 97 addr = (loff_t)(ebnum + 1) * mtd->erasesize - pgcnt * pgsize; in check_eraseblock() 98 len = pgcnt * pgsize; in check_eraseblock() 151 addr = (loff_t)(ebnum + 1) * mtd->erasesize - pgcnt * pgsize; in write_pattern() 152 len = pgcnt * pgsize; in write_pattern() 203 pgsize = 512; in tort_init() 205 pgsize = mtd->writesize; in tort_init() 207 if (pgcnt && (pgcnt > mtd->erasesize / pgsize || pgcnt < 0)) { in tort_init() 235 for (i = 0; i < mtd->erasesize / pgsize; i++) { in tort_init() 237 memset(patt_5A5 + i * pgsize, 0x55, pgsize); in tort_init() [all …]
|
| H A D | speedtest.c | 37 static int pgsize; variable 77 err = mtdtest_write(mtd, addr, pgsize, buf); in write_eraseblock_by_page() 80 addr += pgsize; in write_eraseblock_by_page() 81 buf += pgsize; in write_eraseblock_by_page() 89 size_t sz = pgsize * 2; in write_eraseblock_by_2pages() 102 err = mtdtest_write(mtd, addr, pgsize, buf); in write_eraseblock_by_2pages() 121 err = mtdtest_read(mtd, addr, pgsize, buf); in read_eraseblock_by_page() 124 addr += pgsize; in read_eraseblock_by_page() 125 buf += pgsize; in read_eraseblock_by_page() 133 size_t sz = pgsize * 2; in read_eraseblock_by_2pages() [all …]
|
| H A D | readtest.c | 31 static int pgsize; variable 43 memset(buf, 0 , pgsize); in read_eraseblock_by_page() 44 ret = mtdtest_read(mtd, addr, pgsize, buf); in read_eraseblock_by_page() 72 addr += pgsize; in read_eraseblock_by_page() 73 buf += pgsize; in read_eraseblock_by_page() 138 pgsize = 512; in mtd_readtest_init() 140 pgsize = mtd->writesize; in mtd_readtest_init() 145 pgcnt = mtd->erasesize / pgsize; in mtd_readtest_init() 151 pgsize, ebcnt, pgcnt, mtd->oobsize); in mtd_readtest_init()
|
| H A D | stresstest.c | 38 static int pgsize; variable 95 len = ((len + pgsize - 1) / pgsize) * pgsize; in do_write() 154 pgsize = 512; in mtd_stresstest_init() 156 pgsize = mtd->writesize; in mtd_stresstest_init() 161 pgcnt = mtd->erasesize / pgsize; in mtd_stresstest_init() 167 pgsize, ebcnt, pgcnt, mtd->oobsize); in mtd_stresstest_init()
|
| /linux/tools/testing/selftests/powerpc/tm/ |
| H A D | tm-vmxcopy.c | 39 unsigned long pgsize = getpagesize(); in test_vmxcopy() local 42 int size = pgsize*16; in test_vmxcopy() 44 char buf[pgsize]; in test_vmxcopy() 55 memset(buf, 0, pgsize); in test_vmxcopy() 56 for (i = 0; i < size; i += pgsize) in test_vmxcopy() 57 assert(write(fd, buf, pgsize) == pgsize); in test_vmxcopy()
|
| /linux/tools/testing/selftests/powerpc/mm/ |
| H A D | exec_prot.c | 29 static unsigned long pgsize, numinsns; variable 73 if (mprotect(insns, pgsize, PROT_READ | PROT_WRITE | PROT_EXEC)) { in segv_handler() 98 FAIL_IF(mprotect(insns, pgsize, rights) != 0); in check_exec_fault() 136 pgsize = getpagesize(); in test() 137 numinsns = pgsize / sizeof(unsigned int); in test() 138 insns = (unsigned int *)mmap(NULL, pgsize, PROT_READ | PROT_WRITE, in test() 173 FAIL_IF(mprotect(insns, pgsize, PROT_EXEC) != 0); in test() 185 FAIL_IF(mprotect(insns, pgsize, PROT_EXEC) != 0); in test() 223 FAIL_IF(munmap((void *)insns, pgsize)); in test()
|
| /linux/arch/riscv/mm/ |
| H A D | hugetlbpage.c | 206 unsigned long pgsize, in clear_flush() argument 212 for (i = 0; i < ncontig; i++, addr += pgsize, ptep++) in clear_flush() 218 static int num_contig_ptes_from_size(unsigned long sz, size_t *pgsize) in num_contig_ptes_from_size() argument 233 *pgsize = 1 << hugepage_shift; in num_contig_ptes_from_size() 252 size_t pgsize; in set_huge_pte_at() local 255 pte_num = num_contig_ptes_from_size(sz, &pgsize); in set_huge_pte_at() 258 for (i = 0; i < pte_num; i++, ptep++, addr += pgsize) in set_huge_pte_at() 268 clear_flush(mm, addr, ptep, pgsize, pte_num); in set_huge_pte_at() 270 for (i = 0; i < pte_num; i++, ptep++, addr += pgsize) in set_huge_pte_at() 309 size_t pgsize; in huge_ptep_get_and_clear() local [all …]
|
| /linux/drivers/iommu/ |
| H A D | io-pgtable-arm-selftests.c | 156 static const unsigned long pgsize[] = { in arm_lpae_do_selftests() local 181 for (i = 0; i < ARRAY_SIZE(pgsize); ++i) { in arm_lpae_do_selftests() 185 cfg.pgsize_bitmap = pgsize[i]; in arm_lpae_do_selftests() 189 pgsize[i], cfg.ias, cfg.oas); in arm_lpae_do_selftests()
|
| H A D | apple-dart.c | 221 u32 pgsize; member 544 phys_addr_t paddr, size_t pgsize, in apple_dart_map_pages() argument 554 return ops->map_pages(ops, iova, paddr, pgsize, pgcount, prot, gfp, in apple_dart_map_pages() 559 unsigned long iova, size_t pgsize, in apple_dart_unmap_pages() argument 566 return ops->unmap_pages(ops, iova, pgsize, pgcount, gather); in apple_dart_unmap_pages() 596 if (dart->pgsize > PAGE_SIZE) in apple_dart_finalize_domain() 612 .pgsize_bitmap = dart->pgsize, in apple_dart_finalize_domain() 825 if (cfg_dart->pgsize != dart->pgsize) in apple_dart_of_xlate() 970 if (cfg->stream_maps[0].dart->pgsize > PAGE_SIZE) in apple_dart_def_domain_type() 1145 dart->pgsize = 1 << FIELD_GET(DART_PARAMS1_PAGE_SHIFT, dart_params[0]); in apple_dart_probe() [all …]
|
| H A D | io-pgtable-arm.c | 550 phys_addr_t paddr, size_t pgsize, size_t pgcount, in arm_lpae_map_pages() argument 560 if (WARN_ON(!pgsize || (pgsize & cfg->pgsize_bitmap) != pgsize)) in arm_lpae_map_pages() 572 ret = __arm_lpae_map(data, iova, paddr, pgsize, pgcount, prot, lvl, in arm_lpae_map_pages() 685 size_t pgsize, size_t pgcount, in arm_lpae_unmap_pages() argument 693 if (WARN_ON(!pgsize || (pgsize & cfg->pgsize_bitmap) != pgsize || !pgcount)) in arm_lpae_unmap_pages() 701 return __arm_lpae_unmap(data, gather, iova, pgsize, pgcount, in arm_lpae_unmap_pages()
|
| H A D | s390-iommu.c | 914 size_t pgsize, size_t pgcount, in s390_iommu_map_pages() argument 918 size_t size = pgcount << __ffs(pgsize); in s390_iommu_map_pages() 921 if (pgsize != SZ_4K) in s390_iommu_map_pages() 928 if (!IS_ALIGNED(iova | paddr, pgsize)) in s390_iommu_map_pages() 1027 size_t pgsize, size_t pgcount, in s390_iommu_unmap_pages() argument 1031 size_t size = pgcount << __ffs(pgsize); in s390_iommu_unmap_pages()
|
| H A D | msm_iommu.c | 487 phys_addr_t pa, size_t pgsize, size_t pgcount, in msm_iommu_map() argument 495 ret = priv->iop->map_pages(priv->iop, iova, pa, pgsize, pgcount, prot, in msm_iommu_map() 512 size_t pgsize, size_t pgcount, in msm_iommu_unmap() argument 520 ret = priv->iop->unmap_pages(priv->iop, iova, pgsize, pgcount, gather); in msm_iommu_unmap()
|
| /linux/drivers/gpu/drm/msm/ |
| H A D | msm_iommu.c | 52 size_t offset, pgsize, pgsize_next; in calc_pgsize() local 67 pgsize = BIT(pgsize_idx); in calc_pgsize() 69 return pgsize; in calc_pgsize() 98 return pgsize; in calc_pgsize() 109 size_t pgsize, count; in msm_iommu_pagetable_unmap() local 112 pgsize = calc_pgsize(pagetable, iova, iova, size, &count); in msm_iommu_pagetable_unmap() 114 unmapped = ops->unmap_pages(ops, iova, pgsize, count, NULL); in msm_iommu_pagetable_unmap() 195 size_t pgsize, count, mapped = 0; in msm_iommu_pagetable_map() local 198 pgsize = calc_pgsize(pagetable, addr, phys, size, &count); in msm_iommu_pagetable_map() 200 ret = ops->map_pages(ops, addr, phys, pgsize, count, in msm_iommu_pagetable_map()
|
| /linux/drivers/vfio/ |
| H A D | vfio_iommu_type1.c | 242 static int vfio_dma_bitmap_alloc(struct vfio_dma *dma, size_t pgsize) in vfio_dma_bitmap_alloc() argument 244 uint64_t npages = dma->size / pgsize; in vfio_dma_bitmap_alloc() 268 static void vfio_dma_populate_bitmap(struct vfio_dma *dma, size_t pgsize) in vfio_dma_populate_bitmap() argument 271 unsigned long pgshift = __ffs(pgsize); in vfio_dma_populate_bitmap() 292 static int vfio_dma_bitmap_alloc_all(struct vfio_iommu *iommu, size_t pgsize) in vfio_dma_bitmap_alloc_all() argument 300 ret = vfio_dma_bitmap_alloc(dma, pgsize); in vfio_dma_bitmap_alloc_all() 312 vfio_dma_populate_bitmap(dma, pgsize); in vfio_dma_bitmap_alloc_all() 1273 size_t pgsize) in update_user_bitmap() argument 1275 unsigned long pgshift = __ffs(pgsize); in update_user_bitmap() 1309 dma_addr_t iova, dma_addr_t iova_end, size_t pgsize) in vfio_iova_dirty_bitmap() argument [all …]
|
| /linux/drivers/infiniband/hw/bng_re/ |
| H A D | bng_res.c | 89 pbl->pg_size = sginfo->pgsize; in bng_alloc_pbl() 141 pg_size = hwq_attr->sginfo->pgsize; in bng_re_alloc_init_hwq() 174 sginfo.pgsize = npde * pg_size; in bng_re_alloc_init_hwq() 182 sginfo.pgsize = PAGE_SIZE; in bng_re_alloc_init_hwq() 227 sginfo.pgsize = PAGE_SIZE; in bng_re_alloc_init_hwq()
|
| /linux/arch/powerpc/kvm/ |
| H A D | book3s_64_mmu.c | 206 int pgsize; in kvmppc_mmu_book3s_64_xlate() local 242 pgsize = slbe->large ? MMU_PAGE_16M : MMU_PAGE_4K; in kvmppc_mmu_book3s_64_xlate() 271 pgsize = decode_pagesize(slbe, pte1); in kvmppc_mmu_book3s_64_xlate() 272 if (pgsize < 0) in kvmppc_mmu_book3s_64_xlate() 296 eaddr_mask = (1ull << mmu_pagesize(pgsize)) - 1; in kvmppc_mmu_book3s_64_xlate() 298 gpte->page_size = pgsize; in kvmppc_mmu_book3s_64_xlate()
|
| H A D | e500.h | 165 unsigned int pgsize = get_tlb_size(tlbe); in get_tlb_bytes() local 166 return 1ULL << 10 << pgsize; in get_tlb_bytes()
|
| /linux/include/linux/generic_pt/ |
| H A D | iommu.h | 199 size_t pgsize, size_t pgcount, \ 203 size_t pgsize, size_t pgcount, \
|
| /linux/include/linux/ |
| H A D | io-pgtable.h | 212 phys_addr_t paddr, size_t pgsize, size_t pgcount, 215 size_t pgsize, size_t pgcount,
|
| H A D | iommu.h | 361 size_t pgsize; member 760 phys_addr_t paddr, size_t pgsize, size_t pgcount, 763 size_t pgsize, size_t pgcount, 1051 if ((gather->pgsize && gather->pgsize != size) || in iommu_iotlb_gather_add_page() 1055 gather->pgsize = size; in iommu_iotlb_gather_add_page()
|
| /linux/drivers/iommu/riscv/ |
| H A D | iommu.c | 1116 unsigned long iova, size_t pgsize, in riscv_iommu_pte_alloc() argument 1133 if (((size_t)1 << shift) == pgsize) in riscv_iommu_pte_alloc() 1192 size_t pgsize, size_t pgcount, int prot, in riscv_iommu_map_pages() argument 1210 ptr = riscv_iommu_pte_alloc(domain, iova, pgsize, gfp); in riscv_iommu_map_pages() 1223 size += pgsize; in riscv_iommu_map_pages() 1224 iova += pgsize; in riscv_iommu_map_pages() 1225 phys += pgsize; in riscv_iommu_map_pages() 1247 unsigned long iova, size_t pgsize, in riscv_iommu_unmap_pages() argument 1252 size_t size = pgcount << __ffs(pgsize); in riscv_iommu_unmap_pages()
|
| /linux/drivers/s390/char/ |
| H A D | sclp_diag.h | 51 u8 pgsize; member
|
| /linux/drivers/gpu/drm/panthor/ |
| H A D | panthor_mmu.c | 912 size_t pgsize = get_pgsize(iova + offset, size - offset, &pgcount); in panthor_vm_unmap_pages() local 914 unmapped_sz = ops->unmap_pages(ops, iova + offset, pgsize, pgcount, NULL); in panthor_vm_unmap_pages() 916 if (drm_WARN_ON(&ptdev->base, unmapped_sz != pgsize * pgcount)) { in panthor_vm_unmap_pages() 919 iova + offset + pgsize * pgcount, in panthor_vm_unmap_pages() 928 unmapped_sz / pgsize, pgsize); in panthor_vm_unmap_pages() 967 size_t pgsize = get_pgsize(iova | paddr, len, &pgcount); in panthor_vm_map_pages() local 969 ret = ops->map_pages(ops, iova, paddr, pgsize, pgcount, prot, in panthor_vm_map_pages() 975 mapped / pgsize, pgsize); in panthor_vm_map_pages()
|
| /linux/drivers/iommu/arm/arm-smmu/ |
| H A D | qcom_iommu.c | 430 phys_addr_t paddr, size_t pgsize, size_t pgcount, in qcom_iommu_map() argument 442 ret = ops->map_pages(ops, iova, paddr, pgsize, pgcount, prot, GFP_ATOMIC, mapped); in qcom_iommu_map() 448 size_t pgsize, size_t pgcount, in qcom_iommu_unmap() argument 466 ret = ops->unmap_pages(ops, iova, pgsize, pgcount, gather); in qcom_iommu_unmap()
|