| /linux/arch/powerpc/include/asm/book3s/64/ |
| H A D | mmu-hash.h | 159 void (*hugepage_invalidate)(unsigned long vsid, 421 unsigned long vsid, int ssize) in hpt_vpn() argument 427 return (vsid << (s_shift - VPN_SHIFT)) | ((ea >> VPN_SHIFT) & mask); in hpt_vpn() 437 unsigned long hash, vsid; in hpt_hash() local 446 vsid = vpn >> (SID_SHIFT_1T - VPN_SHIFT); in hpt_hash() 447 hash = vsid ^ (vsid << 25) ^ in hpt_hash() 460 unsigned long vsid, pte_t *ptep, unsigned long trap, 463 unsigned long vsid, pte_t *ptep, unsigned long trap, 474 int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid, 479 unsigned long vsid, pmd_t *pmdp, unsigned long trap, [all …]
|
| H A D | tlbflush-hash.h | 65 extern void flush_hash_hugepage(unsigned long vsid, unsigned long addr,
|
| /linux/arch/powerpc/kvm/ |
| H A D | book3s_32_mmu.c | 73 u64 *vsid); 83 u64 vsid; in kvmppc_mmu_book3s_32_ea_to_vp() local 89 kvmppc_mmu_book3s_32_esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid); in kvmppc_mmu_book3s_32_ea_to_vp() 90 return (((u64)eaddr >> 12) & 0xffff) | (vsid << 16); in kvmppc_mmu_book3s_32_ea_to_vp() 156 u64 vsid; in kvmppc_mmu_book3s_32_xlate_bat() local 158 eaddr >> SID_SHIFT, &vsid); in kvmppc_mmu_book3s_32_xlate_bat() 159 vsid <<= 16; in kvmppc_mmu_book3s_32_xlate_bat() 160 pte->vpage = (((u64)eaddr >> 12) & 0xffff) | vsid; in kvmppc_mmu_book3s_32_xlate_bat() 349 u64 *vsid) in kvmppc_mmu_book3s_32_esid_to_vsid() argument 367 *vsid = VSID_REAL | esid; in kvmppc_mmu_book3s_32_esid_to_vsid() [all …]
|
| H A D | book3s_64_mmu.c | 51 if (vcpu->arch.slb[i].vsid) in kvmppc_mmu_book3s_64_find_slbe() 57 vcpu->arch.slb[i].vsid); in kvmppc_mmu_book3s_64_find_slbe() 78 ((slb->vsid) << (kvmppc_slb_sid_shift(slb) - VPN_SHIFT)); in kvmppc_slb_calc_vpn() 141 page, vcpu_book3s->sdr1, pteg, slbe->vsid); in kvmppc_mmu_book3s_64_get_pteg() 161 avpn |= slbe->vsid << (kvmppc_slb_sid_shift(slbe) - p); in kvmppc_mmu_book3s_64_get_avpn() 383 slbe->vsid = (rs & ~SLB_VSID_B) >> (kvmppc_slb_sid_shift(slbe) - 16); in kvmppc_mmu_book3s_64_slbmte() 573 u64 *vsid) in kvmppc_mmu_book3s_64_esid_to_vsid() argument 585 gvsid = slb->vsid; in kvmppc_mmu_book3s_64_esid_to_vsid() 631 *vsid = gvsid; in kvmppc_mmu_book3s_64_esid_to_vsid() 639 *vsid = VSID_REAL | esid; in kvmppc_mmu_book3s_64_esid_to_vsid()
|
| H A D | book3s_hv_rm_mmu.c | 1103 unsigned long vsid, hash; in kvmppc_hv_find_lock_hpte() local 1120 vsid = (slb_v & ~SLB_VSID_B) >> SLB_VSID_SHIFT_1T; in kvmppc_hv_find_lock_hpte() 1121 vsid ^= vsid << 25; in kvmppc_hv_find_lock_hpte() 1124 vsid = (slb_v & ~SLB_VSID_B) >> SLB_VSID_SHIFT; in kvmppc_hv_find_lock_hpte() 1126 hash = (vsid ^ ((eaddr & somask) >> pshift)) & kvmppc_hpt_mask(&kvm->arch.hpt); in kvmppc_hv_find_lock_hpte()
|
| H A D | book3s_hv_ras.c | 58 unsigned long rs = be64_to_cpu(slb->save_area[i].vsid); in reload_slb()
|
| /linux/drivers/misc/lkdtm/ |
| H A D | powerpc.c | 73 unsigned long esid, vsid; in insert_dup_slb_entry_0() local 80 asm volatile("slbmfev %0,%1" : "=r" (vsid) : "r" (i)); in insert_dup_slb_entry_0() 84 : "r" (vsid), in insert_dup_slb_entry_0() 89 asm volatile("slbmfev %0,%1" : "=r" (vsid) : "r" (i)); in insert_dup_slb_entry_0() 93 : "r" (vsid), in insert_dup_slb_entry_0()
|
| /linux/arch/powerpc/mm/book3s64/ |
| H A D | hash_utils.c | 282 unsigned long vsid = get_kernel_vsid(vaddr, mmu_kernel_ssize); in kernel_map_linear_page() local 283 unsigned long vpn = hpt_vpn(vaddr, vsid, mmu_kernel_ssize); in kernel_map_linear_page() 290 if (!vsid) in kernel_map_linear_page() 311 unsigned long vsid = get_kernel_vsid(vaddr, mmu_kernel_ssize); in kernel_unmap_linear_page() local 312 unsigned long vpn = hpt_vpn(vaddr, vsid, mmu_kernel_ssize); in kernel_unmap_linear_page() 644 unsigned long vsid = get_kernel_vsid(vaddr, ssize); in htab_bolt_mapping() local 645 unsigned long vpn = hpt_vpn(vaddr, vsid, ssize); in htab_bolt_mapping() 652 if (!vsid) in htab_bolt_mapping() 1686 unsigned long vsid, unsigned long trap, in hash_failure_debug() argument 1694 trap, vsid, ssize, psize, lpsize, pte); in hash_failure_debug() [all …]
|
| H A D | hash_tlb.c | 45 unsigned long vsid; in hpte_need_flush() local 90 vsid = get_user_vsid(&mm->context, addr, ssize); in hpte_need_flush() 92 vsid = get_kernel_vsid(addr, mmu_kernel_ssize); in hpte_need_flush() 95 WARN_ON(vsid == 0); in hpte_need_flush() 96 vpn = hpt_vpn(addr, vsid, ssize); in hpte_need_flush()
|
| H A D | hash_64k.c | 38 int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid, in __hash_page_4K() argument 90 vpn = hpt_vpn(ea, vsid, ssize); in __hash_page_4K() 213 hash_failure_debug(ea, access, vsid, trap, ssize, in __hash_page_4K() 229 unsigned long vsid, pte_t *ptep, unsigned long trap, in __hash_page_64K() argument 275 vpn = hpt_vpn(ea, vsid, ssize); in __hash_page_64K() 328 hash_failure_debug(ea, access, vsid, trap, ssize, in __hash_page_64K()
|
| H A D | slb.c | 87 WRITE_ONCE(p->save_area[index].vsid, cpu_to_be64(mk_vsid_data(ea, ssize, flags))); in slb_shadow_update() 126 : "r" (be64_to_cpu(p->save_area[index].vsid)), in __slb_restore_bolted_realmode() 173 ksp_vsid_data = be64_to_cpu(p->save_area[KSTACK_INDEX].vsid); in __slb_flush_and_restore_bolted() 226 slb_ptr->vsid = v; in slb_save_contents() 244 v = slb_ptr->vsid; in slb_dump_contents() 599 unsigned long vsid; in slb_insert_entry() local 603 vsid = get_vsid(context, ea, ssize); in slb_insert_entry() 604 if (!vsid) in slb_insert_entry() 620 vsid_data = __mk_vsid_data(vsid, ssize, flags); in slb_insert_entry()
|
| H A D | hash_4k.c | 21 int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid, in __hash_page_4K() argument 66 vpn = hpt_vpn(ea, vsid, ssize); in __hash_page_4K() 117 hash_failure_debug(ea, access, vsid, trap, ssize, in __hash_page_4K()
|
| /linux/drivers/iommu/arm/arm-smmu-v3/ |
| H A D | arm-smmu-v3-iommufd.c | 105 unsigned long vsid; in arm_smmu_attach_prepare_vmaster() local 111 state->master->dev, &vsid); in arm_smmu_attach_prepare_vmaster() 128 vmaster->vsid = vsid; in arm_smmu_attach_prepare_vmaster() 276 static int arm_vsmmu_vsid_to_sid(struct arm_vsmmu *vsmmu, u32 vsid, u32 *sid) in arm_vsmmu_vsid_to_sid() argument 283 dev = iommufd_viommu_find_dev(&vsmmu->core, (unsigned long)vsid); in arm_vsmmu_vsid_to_sid() 337 u32 sid, vsid = FIELD_GET(CMDQ_CFGI_0_SID, cmd->cmd[0]); in arm_vsmmu_convert_user_cmd() local 339 if (arm_vsmmu_vsid_to_sid(vsmmu, vsid, &sid)) in arm_vsmmu_convert_user_cmd() 476 FIELD_PREP(EVTQ_0_SID, vmaster->vsid)); in arm_vmaster_report_event()
|
| H A D | tegra241-cmdqv.c | 1216 struct tegra241_vintf_sid *vsid = vdev_to_vsid(vdev); in tegra241_vintf_destroy_vsid() local 1217 struct tegra241_vintf *vintf = vsid->vintf; in tegra241_vintf_destroy_vsid() 1219 writel(0, REG_VINTF(vintf, SID_MATCH(vsid->idx))); in tegra241_vintf_destroy_vsid() 1220 writel(0, REG_VINTF(vintf, SID_REPLACE(vsid->idx))); in tegra241_vintf_destroy_vsid() 1221 ida_free(&vintf->sids, vsid->idx); in tegra241_vintf_destroy_vsid() 1224 vsid->idx, vsid->sid); in tegra241_vintf_destroy_vsid() 1232 struct tegra241_vintf_sid *vsid = vdev_to_vsid(vdev); in tegra241_vintf_init_vsid() local 1254 vsid->idx = sidx; in tegra241_vintf_init_vsid() 1255 vsid->vintf = vintf; in tegra241_vintf_init_vsid() 1256 vsid->sid = stream->id; in tegra241_vintf_init_vsid()
|
| /linux/arch/microblaze/include/asm/ |
| H A D | mmu.h | 20 unsigned long vsid:24; /* Virtual segment identifier */ member 48 unsigned long vsid:24; /* Virtual Segment Identifier */ member
|
| /linux/arch/powerpc/mm/ptdump/ |
| H A D | hashpagetable.c | 210 unsigned long hash, vsid, vpn, hpte_group, want_v, hpte_v; in native_find() local 215 vsid = get_kernel_vsid(ea, ssize); in native_find() 216 vpn = hpt_vpn(ea, vsid, ssize); in native_find() 251 unsigned long vsid, vpn, hash, hpte_group, want_v; in pseries_find() local 257 vsid = get_kernel_vsid(ea, ssize); in pseries_find() 258 vpn = hpt_vpn(ea, vsid, ssize); in pseries_find()
|
| /linux/arch/powerpc/include/asm/ |
| H A D | lppaca.h | 142 __be64 vsid; member
|
| H A D | kvm_book3s_asm.h | 147 u64 vsid; member
|
| /linux/arch/powerpc/platforms/cell/ |
| H A D | spu_base.c | 138 __func__, slbe, slb->vsid, slb->esid); in spu_load_slb() 144 out_be64(&priv2->slb_vsid_RW, slb->vsid); in spu_load_slb() 217 slb->vsid = (get_kernel_vsid(ea, MMU_SEGSIZE_256M) << SLB_VSID_SHIFT) | in __spu_kernel_slb()
|
| /linux/arch/powerpc/include/asm/book3s/32/ |
| H A D | mmu-hash.h | 161 unsigned long vsid:24; /* Virtual segment identifier */ member
|
| /linux/arch/powerpc/kernel/ |
| H A D | asm-offsets.c | 226 OFFSET(SLBSHADOW_STACKVSID, slb_shadow, save_area[SLB_NUM_BOLTED - 1].vsid); in main()
|
| /linux/Documentation/virt/kvm/ |
| H A D | api.rst | 3159 be OR'ed into the "vsid" argument of the slbmte instruction.
|