Home
last modified time | relevance | path

Searched refs:granule (Results 1 – 24 of 24) sorted by relevance

/linux/tools/dma/
H A Ddma_map_benchmark.c32 int granule = 1; in main() local
57 granule = atoi(optarg); in main()
94 if (granule < 1 || granule > 1024) { in main()
112 map.granule = granule; in main()
120 threads, seconds, node, directions[dir], granule); in main()
/linux/include/linux/
H A Dio-pgtable.h41 void (*tlb_flush_walk)(unsigned long iova, size_t size, size_t granule,
44 unsigned long iova, size_t granule, void *cookie);
286 size_t size, size_t granule) in io_pgtable_tlb_flush_walk() argument
289 iop->cfg.tlb->tlb_flush_walk(iova, size, granule, iop->cookie); in io_pgtable_tlb_flush_walk()
295 size_t granule) in io_pgtable_tlb_add_page() argument
298 iop->cfg.tlb->tlb_add_page(gather, iova, granule, iop->cookie); in io_pgtable_tlb_add_page()
/linux/drivers/iommu/
H A Dio-pgtable-arm-selftests.c27 size_t granule, void *cookie) in dummy_tlb_flush() argument
34 unsigned long iova, size_t granule, in dummy_tlb_add_page() argument
37 dummy_tlb_flush(iova, granule, granule, cookie); in dummy_tlb_add_page()
H A Dmsm_iommu.c139 size_t granule, bool leaf, void *cookie) in __flush_iotlb_range() argument
159 iova += granule; in __flush_iotlb_range()
160 } while (temp_size -= granule); in __flush_iotlb_range()
171 size_t granule, void *cookie) in __flush_iotlb_walk() argument
173 __flush_iotlb_range(iova, size, granule, false, cookie); in __flush_iotlb_walk()
177 unsigned long iova, size_t granule, void *cookie) in __flush_iotlb_page() argument
179 __flush_iotlb_range(iova, granule, granule, true, cookie); in __flush_iotlb_page()
H A Diova.c38 init_iova_domain(struct iova_domain *iovad, unsigned long granule, in init_iova_domain() argument
46 BUG_ON((granule > PAGE_SIZE) || !is_power_of_2(granule)); in init_iova_domain()
52 iovad->granule = granule; in init_iova_domain()
H A Dio-pgtable-arm.c875 unsigned long granule, page_sizes; in arm_lpae_restrict_pgsizes() local
886 granule = PAGE_SIZE; in arm_lpae_restrict_pgsizes()
888 granule = 1UL << __fls(cfg->pgsize_bitmap & ~PAGE_MASK); in arm_lpae_restrict_pgsizes()
890 granule = 1UL << __ffs(cfg->pgsize_bitmap & PAGE_MASK); in arm_lpae_restrict_pgsizes()
892 granule = 0; in arm_lpae_restrict_pgsizes()
894 switch (granule) { in arm_lpae_restrict_pgsizes()
H A Dio-pgtable-arm-v7s.c786 size_t granule, void *cookie) in dummy_tlb_flush() argument
793 unsigned long iova, size_t granule, in dummy_tlb_add_page() argument
796 dummy_tlb_flush(iova, granule, granule, cookie); in dummy_tlb_add_page()
H A Dvirtio-iommu.c406 unsigned long granule = 1UL << __ffs(vdomain->domain.pgsize_bitmap); in viommu_domain_map_identity() local
408 iova = ALIGN(iova, granule); in viommu_domain_map_identity()
409 limit = ALIGN_DOWN(limit + 1, granule) - 1; in viommu_domain_map_identity()
412 u64 resv_start = ALIGN_DOWN(resv->start, granule); in viommu_domain_map_identity()
413 u64 resv_end = ALIGN(resv->start + resv->length, granule) - 1; in viommu_domain_map_identity()
H A Ddma-iommu.c428 if (cookie->iovad.granule) { in iommu_put_dma_cookie()
491 start += iovad->granule; in cookie_init_hw_msi_region()
691 if (1UL << order != iovad->granule || in iommu_dma_init_domain()
1189 * the IOMMU granule. Returns non-zero if either the start or end
1190 * address is not aligned to the granule boundary.
1884 size_t bounce_len = min(size, iovad->granule - iova_start_pad); in iommu_dma_iova_link_swiotlb()
2021 end - addr, iovad->granule - iova_start_pad); in __iommu_dma_iova_unlink()
2139 return domain->iova_cookie->iovad.granule; in cookie_msi_pages()
H A Dipmmu-vmsa.c317 size_t granule, void *cookie) in ipmmu_tlb_flush() argument
/linux/drivers/virt/coco/arm-cca-guest/
H A Darm-cca-guest.c28 phys_addr_t granule; member
64 info->result = rsi_attestation_token_continue(info->granule, in arm_cca_attestation_continue()
139 info.granule = (unsigned long)virt_to_phys(buf); in arm_cca_report_new()
/linux/drivers/iommu/arm/arm-smmu/
H A Dqcom_iommu.c148 size_t granule, bool leaf, void *cookie) in qcom_iommu_tlb_inv_range_nosync() argument
164 iova += granule; in qcom_iommu_tlb_inv_range_nosync()
165 } while (s -= granule); in qcom_iommu_tlb_inv_range_nosync()
170 size_t granule, void *cookie) in qcom_iommu_tlb_flush_walk() argument
172 qcom_iommu_tlb_inv_range_nosync(iova, size, granule, false, cookie); in qcom_iommu_tlb_flush_walk()
177 unsigned long iova, size_t granule, in qcom_iommu_tlb_add_page() argument
180 qcom_iommu_tlb_inv_range_nosync(iova, granule, granule, true, cookie); in qcom_iommu_tlb_add_page()
H A Darm-smmu.c282 size_t granule, void *cookie, int reg) in arm_smmu_tlb_inv_range_s1() argument
297 iova += granule; in arm_smmu_tlb_inv_range_s1()
298 } while (size -= granule); in arm_smmu_tlb_inv_range_s1()
304 iova += granule >> 12; in arm_smmu_tlb_inv_range_s1()
305 } while (size -= granule); in arm_smmu_tlb_inv_range_s1()
310 size_t granule, void *cookie, int reg) in arm_smmu_tlb_inv_range_s2() argument
325 iova += granule >> 12; in arm_smmu_tlb_inv_range_s2()
326 } while (size -= granule); in arm_smmu_tlb_inv_range_s2()
330 size_t granule, void *cookie) in arm_smmu_tlb_inv_walk_s1() argument
338 arm_smmu_tlb_inv_range_s1(iova, size, granule, cookie, in arm_smmu_tlb_inv_walk_s1()
[all …]
/linux/kernel/dma/
H A Dmap_benchmark.c39 int npages = map->bparam.granule; in map_benchmark_thread()
236 if (map->bparam.granule < 1 || map->bparam.granule > 1024) { in map_benchmark_ioctl()
/linux/arch/arm64/kvm/hyp/
H A Dpgtable.c34 u64 granule = kvm_granule_size(ctx->level); in kvm_block_mapping_supported() local
39 if (granule > (ctx->end - ctx->addr)) in kvm_block_mapping_supported()
42 if (!IS_ALIGNED(phys, granule)) in kvm_block_mapping_supported()
45 return IS_ALIGNED(ctx->addr, granule); in kvm_block_mapping_supported()
478 u64 granule = kvm_granule_size(ctx->level); in hyp_unmap_walker() local
495 if (ctx->end - ctx->addr < granule) in hyp_unmap_walker()
501 *unmapped += granule; in hyp_unmap_walker()
957 u64 granule = kvm_granule_size(ctx->level); in stage2_map_walker_try_leaf() local
1000 granule); in stage2_map_walker_try_leaf()
1004 mm_ops->icache_inval_pou(kvm_pte_follow(new, mm_ops), granule); in stage2_map_walker_try_leaf()
/linux/include/uapi/linux/
H A Dmap_benchmark.h31 __u32 granule; /* how many PAGE_SIZE will do map/unmap once a time */ member
/linux/arch/powerpc/boot/dts/
H A Dmicrowatt.dts119 reservation-granule-size = <64>;
149 reservation-granule-size = <64>;
/linux/drivers/gpu/drm/msm/
H A Dmsm_iommu.c464 size_t granule, void *cookie) in msm_iommu_tlb_flush_walk() argument
474 pagetable->tlb->tlb_flush_walk(iova, size, granule, (void *)adreno_smmu->cookie); in msm_iommu_tlb_flush_walk()
480 unsigned long iova, size_t granule, void *cookie) in msm_iommu_tlb_add_page() argument
/linux/Documentation/arch/arm64/
H A Dmemory-tagging-extension.rst19 allocation tag for each 16-byte granule in the physical address space.
197 4-bit tag per byte and correspond to a 16-byte MTE tag granule in the
200 **Note**: If ``addr`` is not aligned to a 16-byte granule, the kernel
247 in a byte. With the tag granule of 16 bytes, a 4K page requires 128
/linux/arch/arm64/kvm/hyp/nvhe/
H A Dmem_protect.c497 u64 granule; in host_stage2_adjust_range() local
518 granule = kvm_granule_size(level); in host_stage2_adjust_range()
519 cur.start = ALIGN_DOWN(addr, granule); in host_stage2_adjust_range()
520 cur.end = cur.start + granule; in host_stage2_adjust_range()
/linux/drivers/iommu/arm/arm-smmu-v3/
H A Darm-smmu-v3.c2331 size_t granule, in __arm_smmu_tlb_inv_range() argument
2336 size_t inv_range = granule; in __arm_smmu_tlb_inv_range()
2360 cmd->tlbi.ttl = 4 - ((ilog2(granule) - 3) / (tg - 3)); in __arm_smmu_tlb_inv_range()
2401 size_t granule, bool leaf, in arm_smmu_tlb_inv_range_domain() argument
2418 __arm_smmu_tlb_inv_range(&cmd, iova, size, granule, smmu_domain); in arm_smmu_tlb_inv_range_domain()
2437 size_t granule, bool leaf, in arm_smmu_tlb_inv_range_asid() argument
2449 __arm_smmu_tlb_inv_range(&cmd, iova, size, granule, smmu_domain); in arm_smmu_tlb_inv_range_asid()
2453 unsigned long iova, size_t granule, in arm_smmu_tlb_inv_page_nosync() argument
2459 iommu_iotlb_gather_add_page(domain, gather, iova, granule); in arm_smmu_tlb_inv_page_nosync()
2463 size_t granule, void *cookie) in arm_smmu_tlb_inv_walk() argument
[all …]
/linux/Documentation/dev-tools/
H A Dkasan.rst253 Internally, KASAN tracks memory state separately for each memory granule, which
258 For Generic KASAN, the size of each memory granule is 8. The state of each
259 granule is encoded in one shadow byte. Those 8 bytes can be accessible,
/linux/drivers/gpu/drm/panfrost/
H A Dpanfrost_mmu.c539 static void mmu_tlb_flush_walk(unsigned long iova, size_t size, size_t granule, in mmu_tlb_flush_walk() argument
/linux/drivers/gpu/drm/panthor/
H A Dpanthor_mmu.c1601 static void mmu_tlb_flush_walk(unsigned long iova, size_t size, size_t granule, void *cookie) in mmu_tlb_flush_walk() argument