| /linux/drivers/net/ethernet/mellanox/mlxsw/ |
| H A D | spectrum_pgt.c | 32 mutex_lock(&mlxsw_sp->pgt->lock); in mlxsw_sp_pgt_mid_alloc() 33 index = idr_alloc(&mlxsw_sp->pgt->pgt_idr, NULL, 0, in mlxsw_sp_pgt_mid_alloc() 34 mlxsw_sp->pgt->end_index, GFP_KERNEL); in mlxsw_sp_pgt_mid_alloc() 42 mutex_unlock(&mlxsw_sp->pgt->lock); in mlxsw_sp_pgt_mid_alloc() 46 mutex_unlock(&mlxsw_sp->pgt->lock); in mlxsw_sp_pgt_mid_alloc() 52 mutex_lock(&mlxsw_sp->pgt->lock); in mlxsw_sp_pgt_mid_free() 53 WARN_ON(idr_remove(&mlxsw_sp->pgt->pgt_idr, mid_base)); in mlxsw_sp_pgt_mid_free() 54 mutex_unlock(&mlxsw_sp->pgt->lock); in mlxsw_sp_pgt_mid_free() 63 mutex_lock(&mlxsw_sp->pgt->lock); in mlxsw_sp_pgt_mid_alloc_range() 65 mid_base = idr_get_cursor(&mlxsw_sp->pgt->pgt_idr); in mlxsw_sp_pgt_mid_alloc_range() [all …]
|
| /linux/arch/arm64/kvm/hyp/ |
| H A D | pgtable.c | 56 static u32 kvm_pgd_page_idx(struct kvm_pgtable *pgt, u64 addr) in kvm_pgd_page_idx() argument 58 u64 shift = kvm_granule_shift(pgt->start_level - 1); /* May underflow */ in kvm_pgd_page_idx() 59 u64 mask = BIT(pgt->ia_bits) - 1; in kvm_pgd_page_idx() 66 struct kvm_pgtable pgt = { in kvm_pgd_pages() local 71 return kvm_pgd_page_idx(&pgt, -1ULL) + 1; in kvm_pgd_pages() 245 static int _kvm_pgtable_walk(struct kvm_pgtable *pgt, struct kvm_pgtable_walk_data *data) in _kvm_pgtable_walk() argument 249 u64 limit = BIT(pgt->ia_bits); in _kvm_pgtable_walk() 254 if (!pgt->pgd) in _kvm_pgtable_walk() 257 for (idx = kvm_pgd_page_idx(pgt, data->addr); data->addr < data->end; ++idx) { in _kvm_pgtable_walk() 258 kvm_pteref_t pteref = &pgt->pgd[idx * PTRS_PER_PTE]; in _kvm_pgtable_walk() [all …]
|
| /linux/arch/arm64/kvm/ |
| H A D | pkvm.c | 316 int pkvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu, in pkvm_pgtable_stage2_init() argument 319 pgt->pkvm_mappings = RB_ROOT_CACHED; in pkvm_pgtable_stage2_init() 320 pgt->mmu = mmu; in pkvm_pgtable_stage2_init() 325 static int __pkvm_pgtable_stage2_unmap(struct kvm_pgtable *pgt, u64 start, u64 end) in __pkvm_pgtable_stage2_unmap() argument 327 struct kvm *kvm = kvm_s2_mmu_to_kvm(pgt->mmu); in __pkvm_pgtable_stage2_unmap() 335 for_each_mapping_in_range_safe(pgt, start, end, mapping) { in __pkvm_pgtable_stage2_unmap() 340 pkvm_mapping_remove(mapping, &pgt->pkvm_mappings); in __pkvm_pgtable_stage2_unmap() 347 void pkvm_pgtable_stage2_destroy_range(struct kvm_pgtable *pgt, in pkvm_pgtable_stage2_destroy_range() argument 350 __pkvm_pgtable_stage2_unmap(pgt, addr, addr + size); in pkvm_pgtable_stage2_destroy_range() 353 void pkvm_pgtable_stage2_destroy_pgd(struct kvm_pgtable *pgt) in pkvm_pgtable_stage2_destroy_pgd() argument [all …]
|
| H A D | mmu.c | 72 struct kvm_pgtable *pgt = mmu->pgt; in stage2_apply_range() local 73 if (!pgt) in stage2_apply_range() 77 ret = fn(pgt, addr, next - addr); in stage2_apply_range() 124 struct kvm_pgtable *pgt; in kvm_mmu_split_huge_pages() local 151 pgt = kvm->arch.mmu.pgt; in kvm_mmu_split_huge_pages() 152 if (!pgt) in kvm_mmu_split_huge_pages() 156 ret = KVM_PGT_FN(kvm_pgtable_stage2_split)(pgt, addr, next - addr, cache); in kvm_mmu_split_huge_pages() 821 struct kvm_pgtable pgt = { in get_user_mapping_size() local 825 ARM64_HW_PGTABLE_LEVELS(pgt.ia_bits) + 1), in get_user_mapping_size() 839 ret = kvm_pgtable_get_leaf(&pgt, addr, &pte, &level); in get_user_mapping_size() [all …]
|
| H A D | ptdump.c | 119 struct kvm_pgtable *pgtable = mmu->pgt; in kvm_ptdump_parser_create() 164 ret = kvm_pgtable_walk(mmu->pgt, 0, BIT(mmu->pgt->ia_bits), &walker); in kvm_ptdump_guest_show() 239 pgtable = kvm->arch.mmu.pgt; in kvm_pgtable_debugfs_open()
|
| H A D | nested.c | 106 kvm->arch.nested_mmus[i].pgt->mmu = &kvm->arch.nested_mmus[i]; in kvm_vcpu_init_nested() 557 if (kvm_pgtable_get_leaf(mmu->pgt, tmp, &pte, NULL)) in get_guest_mapping_ttl() 1111 kvm_invalidate_vncr_ipa(kvm, 0, BIT(kvm->arch.mmu.pgt->ia_bits)); in kvm_nested_s2_wp() 1127 kvm_invalidate_vncr_ipa(kvm, 0, BIT(kvm->arch.mmu.pgt->ia_bits)); in kvm_nested_s2_unmap()
|
| /linux/arch/arm64/include/asm/ |
| H A D | kvm_pgtable.h | 459 int kvm_pgtable_hyp_init(struct kvm_pgtable *pgt, u32 va_bits, 469 void kvm_pgtable_hyp_destroy(struct kvm_pgtable *pgt); 490 int kvm_pgtable_hyp_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys, 512 u64 kvm_pgtable_hyp_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size); 549 int __kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu, 554 static inline int kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu, in kvm_pgtable_stage2_init() argument 557 return __kvm_pgtable_stage2_init(pgt, mmu, mm_ops, 0, NULL); in kvm_pgtable_stage2_init() 567 void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt); 578 void kvm_pgtable_stage2_destroy_range(struct kvm_pgtable *pgt, 587 void kvm_pgtable_stage2_destroy_pgd(struct kvm_pgtable *pgt); [all …]
|
| H A D | kvm_pkvm.h | 181 int pkvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu, 183 void pkvm_pgtable_stage2_destroy_range(struct kvm_pgtable *pgt, 185 void pkvm_pgtable_stage2_destroy_pgd(struct kvm_pgtable *pgt); 186 int pkvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys, 189 int pkvm_pgtable_stage2_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size); 190 int pkvm_pgtable_stage2_wrprotect(struct kvm_pgtable *pgt, u64 addr, u64 size); 191 int pkvm_pgtable_stage2_flush(struct kvm_pgtable *pgt, u64 addr, u64 size); 192 bool pkvm_pgtable_stage2_test_clear_young(struct kvm_pgtable *pgt, u64 addr, u64 size, bool mkold); 193 int pkvm_pgtable_stage2_relax_perms(struct kvm_pgtable *pgt, u64 addr, enum kvm_pgtable_prot prot, 195 void pkvm_pgtable_stage2_mkyoung(struct kvm_pgtable *pgt, u64 addr, [all …]
|
| H A D | kvm_host.h | 167 struct kvm_pgtable *pgt; member
|
| /linux/drivers/gpu/drm/nouveau/nvkm/engine/dma/ |
| H A D | usernv04.c | 52 struct nvkm_memory *pgt = in nv04_dmaobj_bind() local 55 return nvkm_gpuobj_wrap(pgt, pgpuobj); in nv04_dmaobj_bind() 56 nvkm_kmap(pgt); in nv04_dmaobj_bind() 57 offset = nvkm_ro32(pgt, 8 + (offset >> 10)); in nv04_dmaobj_bind() 59 nvkm_done(pgt); in nv04_dmaobj_bind()
|
| /linux/arch/arm64/kvm/hyp/nvhe/ |
| H A D | mem_protect.c | 156 ret = __kvm_pgtable_stage2_init(&host_mmu.pgt, mmu, in kvm_host_prepare_stage2() 162 mmu->pgd_phys = __hyp_pa(host_mmu.pgt.pgd); in kvm_host_prepare_stage2() 163 mmu->pgt = &host_mmu.pgt; in kvm_host_prepare_stage2() 283 ret = __kvm_pgtable_stage2_init(mmu->pgt, mmu, &vm->mm_ops, 0, NULL); in kvm_guest_prepare_stage2() 288 vm->kvm.arch.mmu.pgd_phys = __hyp_pa(vm->pgt.pgd); in kvm_guest_prepare_stage2() 300 kvm_pgtable_stage2_destroy(&vm->pgt); in reclaim_pgtable_pages() 355 struct kvm_pgtable *pgt = &host_mmu.pgt; in host_stage2_unmap_dev_all() local 363 ret = kvm_pgtable_stage2_unmap(pgt, addr, reg->base - addr); in host_stage2_unmap_dev_all() 367 return kvm_pgtable_stage2_unmap(pgt, addr, BIT(pgt->ia_bits) - addr); in host_stage2_unmap_dev_all() 462 return kvm_pgtable_stage2_map(&host_mmu.pgt, start, end - start, start, in __host_stage2_idmap() [all …]
|
| H A D | pkvm.c | 436 mmu->pgt = &hyp_vm->pgt; in init_pkvm_hyp_vm()
|
| /linux/arch/s390/kvm/ |
| H A D | gaccess.c | 1200 unsigned long *pgt, int *dat_protection, in kvm_s390_shadow_tables() argument 1256 *pgt = ptr + vaddr.rfx * 8; in kvm_s390_shadow_tables() 1284 *pgt = ptr + vaddr.rsx * 8; in kvm_s390_shadow_tables() 1313 *pgt = ptr + vaddr.rtx * 8; in kvm_s390_shadow_tables() 1351 *pgt = ptr + vaddr.sx * 8; in kvm_s390_shadow_tables() 1378 *pgt = ptr; in kvm_s390_shadow_tables() 1395 static int shadow_pgt_lookup(struct gmap *sg, unsigned long saddr, unsigned long *pgt, in shadow_pgt_lookup() argument 1409 *pgt = pt_index & ~GMAP_SHADOW_FAKE_TABLE; in shadow_pgt_lookup() 1439 unsigned long pgt = 0; in kvm_s390_shadow_fault() local 1454 rc = shadow_pgt_lookup(sg, saddr, &pgt, &dat_protection, &fake); in kvm_s390_shadow_fault() [all …]
|
| /linux/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ |
| H A D | vmmnv50.c | 106 nv50_vmm_pde(struct nvkm_vmm *vmm, struct nvkm_vmm_pt *pgt, u64 *pdata) in nv50_vmm_pde() argument 110 if (pgt && (pt = pgt->pt[0])) { in nv50_vmm_pde() 111 switch (pgt->page) { in nv50_vmm_pde()
|
| H A D | vmmgf100.c | 108 struct nvkm_vmm_pt *pgt = pgd->pde[pdei]; in gf100_vmm_pgd_pde() local 113 if ((pt = pgt->pt[0])) { in gf100_vmm_pgd_pde() 127 if ((pt = pgt->pt[1])) { in gf100_vmm_pgd_pde()
|
| /linux/arch/s390/mm/ |
| H A D | gmap.c | 1178 unsigned long *pgt) in __gmap_unshadow_pgt() argument 1184 pgt[i] = _PAGE_INVALID; in __gmap_unshadow_pgt() 1197 phys_addr_t sto, pgt; in gmap_unshadow_pgt() local 1207 pgt = *ste & _SEGMENT_ENTRY_ORIGIN; in gmap_unshadow_pgt() 1209 __gmap_unshadow_pgt(sg, raddr, __va(pgt)); in gmap_unshadow_pgt() 1211 ptdesc = page_ptdesc(phys_to_page(pgt)); in gmap_unshadow_pgt() 1227 phys_addr_t pgt; in __gmap_unshadow_sgt() local 1234 pgt = sgt[i] & _REGION_ENTRY_ORIGIN; in __gmap_unshadow_sgt() 1236 __gmap_unshadow_pgt(sg, raddr, __va(pgt)); in __gmap_unshadow_sgt() 1238 ptdesc = page_ptdesc(phys_to_page(pgt)); in __gmap_unshadow_sgt() [all …]
|
| /linux/arch/arm64/kvm/hyp/include/nvhe/ |
| H A D | mem_protect.h | 20 struct kvm_pgtable pgt; member
|
| /linux/drivers/accel/habanalabs/common/mmu/ |
| H A D | mmu.c | 953 u64 hl_mmu_hr_pte_phys_to_virt(struct hl_ctx *ctx, struct pgt_info *pgt, in hl_mmu_hr_pte_phys_to_virt() argument 959 return pgt->virt_addr + pte_offset; in hl_mmu_hr_pte_phys_to_virt()
|
| /linux/arch/s390/include/asm/ |
| H A D | pgtable.h | 2029 static inline unsigned long gmap_pgste_get_pgt_addr(unsigned long *pgt) in gmap_pgste_get_pgt_addr() argument 2033 pgstes = pgt + _PAGE_ENTRIES; in gmap_pgste_get_pgt_addr()
|
| /linux/drivers/gpu/drm/panthor/ |
| H A D | panthor_mmu.c | 837 const struct io_pgtable *pgt = io_pgtable_ops_to_pgtable(vm->pgtbl_ops); in panthor_vm_page_size() local 838 u32 pg_shift = ffs(pgt->cfg.pgsize_bitmap) - 1; in panthor_vm_page_size()
|
| /linux/drivers/net/ethernet/broadcom/bnx2x/ |
| H A D | bnx2x_self_test.c | 78 static int pgt(struct st_pred_args *args) in pgt() function 395 NA, 1, 0, pgt,
|