/linux/arch/arm64/mm/ |
H A D | context.c | 38 #define ctxid2asid(asid) ((asid) & ~ASID_MASK) argument 39 #define asid2ctxid(asid, genid) ((asid) | (genid)) argument 44 u32 asid; in get_cpu_asid_bits() local 54 asid = 8; in get_cpu_asid_bits() 57 asid = 16; in get_cpu_asid_bits() 60 return asid; in get_cpu_asid_bits() 66 u32 asid = get_cpu_asid_bits(); in verify_cpu_asid_bits() local 68 if (asid < asid_bits) { in verify_cpu_asid_bits() 74 smp_processor_id(), asid, asid_bits); in verify_cpu_asid_bits() 101 #define asid_gen_match(asid) \ argument [all …]
|
/linux/arch/arm/mm/ |
H A D | context.c | 56 u64 context_id, asid; in a15_erratum_get_cpumask() local 67 asid = per_cpu(active_asids, cpu).counter; in a15_erratum_get_cpumask() 68 if (asid == 0) in a15_erratum_get_cpumask() 69 asid = per_cpu(reserved_asids, cpu); in a15_erratum_get_cpumask() 70 if (context_id == asid) in a15_erratum_get_cpumask() 139 u64 asid; in flush_context() local 144 asid = atomic64_xchg(&per_cpu(active_asids, i), 0); in flush_context() 152 if (asid == 0) in flush_context() 153 asid = per_cpu(reserved_asids, i); in flush_context() 154 __set_bit(asid & ~ASID_MASK, asid_map); in flush_context() [all …]
|
/linux/arch/csky/mm/ |
H A D | asid.c | 21 #define asid2idx(info, asid) (((asid) & ~ASID_MASK(info)) >> (info)->ctxt_shift) argument 27 u64 asid; in flush_context() local 33 asid = atomic64_xchg_relaxed(&active_asid(info, i), 0); in flush_context() 41 if (asid == 0) in flush_context() 42 asid = reserved_asid(info, i); in flush_context() 43 __set_bit(asid2idx(info, asid), info->map); in flush_context() 44 reserved_asid(info, i) = asid; in flush_context() 54 static bool check_update_reserved_asid(struct asid_info *info, u64 asid, in check_update_reserved_asid() argument 70 if (reserved_asid(info, cpu) == asid) { in check_update_reserved_asid() 83 u64 asid = atomic64_read(pasid); in new_context() local [all …]
|
/linux/arch/xtensa/include/asm/ |
H A D | mmu_context.h | 72 unsigned long asid = cpu_asid_cache(cpu); in get_new_mmu_context() local 73 if ((++asid & ASID_MASK) == 0) { in get_new_mmu_context() 79 asid += ASID_USER_FIRST; in get_new_mmu_context() 81 cpu_asid_cache(cpu) = asid; in get_new_mmu_context() 82 mm->context.asid[cpu] = asid; in get_new_mmu_context() 93 unsigned long asid = mm->context.asid[cpu]; in get_mmu_context() local 95 if (asid == NO_CONTEXT || in get_mmu_context() 96 ((asid ^ cpu_asid_cache(cpu)) & ~ASID_MASK)) in get_mmu_context() 104 set_rasid_register(ASID_INSERT(mm->context.asid[cpu])); in activate_context() 120 mm->context.asid[cpu] = NO_CONTEXT; in init_new_context()
|
/linux/arch/sh/mm/ |
H A D | tlbflush_32.c | 21 unsigned long asid; in local_flush_tlb_page() local 24 asid = cpu_asid(cpu, vma->vm_mm); in local_flush_tlb_page() 30 set_asid(asid); in local_flush_tlb_page() 32 local_flush_tlb_one(asid, page); in local_flush_tlb_page() 56 unsigned long asid; in local_flush_tlb_range() local 59 asid = cpu_asid(cpu, mm); in local_flush_tlb_range() 65 set_asid(asid); in local_flush_tlb_range() 68 local_flush_tlb_one(asid, start); in local_flush_tlb_range() 89 unsigned long asid; in local_flush_tlb_kernel_range() local 92 asid = cpu_asid(cpu, &init_mm); in local_flush_tlb_kernel_range() [all …]
|
/linux/arch/sh/include/asm/ |
H A D | mmu_context_32.h | 6 static inline void set_asid(unsigned long asid) in set_asid() argument 8 __raw_writel(asid, MMU_PTEAEX); in set_asid() 16 static inline void set_asid(unsigned long asid) in set_asid() argument 25 : "r" (asid), "m" (__m(MMU_PTEH)), in set_asid() 31 unsigned long asid; in get_asid() local 34 : "=r" (asid) in get_asid() 36 asid &= MMU_CONTEXT_ASID_MASK; in get_asid() 37 return asid; in get_asid()
|
H A D | mmu_context.h | 57 unsigned long asid = asid_cache(cpu); in get_mmu_context() local 60 if (((cpu_context(cpu, mm) ^ asid) & MMU_CONTEXT_VERSION_MASK) == 0) in get_mmu_context() 65 if (!(++asid & MMU_CONTEXT_ASID_MASK)) { in get_mmu_context() 76 if (!asid) in get_mmu_context() 77 asid = MMU_CONTEXT_FIRST_VERSION; in get_mmu_context() 80 cpu_context(cpu, mm) = asid_cache(cpu) = asid; in get_mmu_context() 128 #define set_asid(asid) do { } while (0) argument 131 #define switch_and_save_asid(asid) (0) argument
|
H A D | tlbflush.h | 23 extern void local_flush_tlb_one(unsigned long asid, unsigned long page); 35 extern void flush_tlb_one(unsigned long asid, unsigned long page); 42 #define flush_tlb_one(asid, page) local_flush_tlb_one(asid, page) argument
|
/linux/arch/loongarch/include/asm/ |
H A D | mmu_context.h | 34 #define cpu_context(cpu, mm) ((mm)->context.asid[cpu]) 54 u64 asid = asid_cache(cpu); in get_new_mmu_context() local 56 if (!((++asid) & cpu_asid_mask(&cpu_data[cpu]))) in get_new_mmu_context() 59 cpu_context(cpu, mm) = asid_cache(cpu) = asid; in get_new_mmu_context() 77 static inline void atomic_update_pgd_asid(unsigned long asid, unsigned long pgdl) in atomic_update_pgd_asid() argument 82 : [asid_val] "+r" (asid), [pgdl_val] "+r" (pgdl) in atomic_update_pgd_asid() 143 int asid; in drop_mmu_context() local 148 asid = read_csr_asid() & cpu_asid_mask(¤t_cpu_data); in drop_mmu_context() 150 if (asid == cpu_asid(cpu, mm)) { in drop_mmu_context()
|
/linux/drivers/misc/sgi-gru/ |
H A D | grumain.c | 89 static int gru_reset_asid_limit(struct gru_state *gru, int asid) in gru_reset_asid_limit() argument 93 gru_dbg(grudev, "gid %d, asid 0x%x\n", gru->gs_gid, asid); in gru_reset_asid_limit() 96 if (asid >= limit) in gru_reset_asid_limit() 97 asid = gru_wrap_asid(gru); in gru_reset_asid_limit() 108 if (inuse_asid == asid) { in gru_reset_asid_limit() 109 asid += ASID_INC; in gru_reset_asid_limit() 110 if (asid >= limit) { in gru_reset_asid_limit() 116 if (asid >= MAX_ASID) in gru_reset_asid_limit() 117 asid = gru_wrap_asid(gru); in gru_reset_asid_limit() 122 if ((inuse_asid > asid) && (inuse_asid < limit)) in gru_reset_asid_limit() [all …]
|
H A D | grutlbpurge.c | 150 int grupagesize, pagesize, pageshift, gid, asid; in gru_flush_tlb_range() local 167 asid = asids->mt_asid; in gru_flush_tlb_range() 168 if (asids->mt_ctxbitmap && asid) { in gru_flush_tlb_range() 170 asid = GRUASID(asid, start); in gru_flush_tlb_range() 173 gid, asid, start, grupagesize, num, asids->mt_ctxbitmap); in gru_flush_tlb_range() 175 tgh_invalidate(tgh, start, ~0, asid, grupagesize, 0, in gru_flush_tlb_range() 184 gid, asid, asids->mt_ctxbitmap, in gru_flush_tlb_range()
|
H A D | gruhandles.c | 135 int asid, int pagesize, int global, int n, in tgh_invalidate() argument 139 tgh->asid = asid; in tgh_invalidate() 152 unsigned long vaddr, int asid, int dirty, in tfh_write_only() argument 155 tfh->fillasid = asid; in tfh_write_only() 168 unsigned long vaddr, int asid, int dirty, in tfh_write_restart() argument 171 tfh->fillasid = asid; in tfh_write_restart()
|
/linux/drivers/vhost/ |
H A D | vdpa.c | 71 u64 last, u32 asid); 80 static struct vhost_vdpa_as *asid_to_as(struct vhost_vdpa *v, u32 asid) in asid_to_as() argument 82 struct hlist_head *head = &v->as[asid % VHOST_VDPA_IOTLB_BUCKETS]; in asid_to_as() 86 if (as->id == asid) in asid_to_as() 92 static struct vhost_iotlb *asid_to_iotlb(struct vhost_vdpa *v, u32 asid) in asid_to_iotlb() argument 94 struct vhost_vdpa_as *as = asid_to_as(v, asid); in asid_to_iotlb() 102 static struct vhost_vdpa_as *vhost_vdpa_alloc_as(struct vhost_vdpa *v, u32 asid) in vhost_vdpa_alloc_as() argument 104 struct hlist_head *head = &v->as[asid % VHOST_VDPA_IOTLB_BUCKETS]; in vhost_vdpa_alloc_as() 107 if (asid_to_as(v, asid)) in vhost_vdpa_alloc_as() 110 if (asid >= v->vdpa->nas) in vhost_vdpa_alloc_as() [all …]
|
/linux/drivers/accel/habanalabs/common/ |
H A D | asid.c | 50 void hl_asid_free(struct hl_device *hdev, unsigned long asid) in hl_asid_free() argument 52 if (asid == HL_KERNEL_ASID_ID || asid >= hdev->asic_prop.max_asid) { in hl_asid_free() 53 dev_crit(hdev->dev, "Invalid ASID %lu", asid); in hl_asid_free() 57 clear_bit(asid, hdev->asid_bitmap); in hl_asid_free()
|
H A D | context.c | 104 if (ctx->asid != HL_KERNEL_ASID_ID) { in hl_ctx_fini() 105 dev_dbg(hdev->dev, "closing user context, asid=%u\n", ctx->asid); in hl_ctx_fini() 120 hl_asid_free(hdev, ctx->asid); in hl_ctx_fini() 228 ctx->asid = HL_KERNEL_ASID_ID; /* Kernel driver gets ASID 0 */ in hl_ctx_init() 242 ctx->asid = hl_asid_alloc(hdev); in hl_ctx_init() 243 if (!ctx->asid) { in hl_ctx_init() 273 dev_dbg(hdev->dev, "create user context, comm=\"%s\", asid=%u\n", in hl_ctx_init() 274 current->comm, ctx->asid); in hl_ctx_init() 284 if (ctx->asid ! in hl_ctx_init() [all...] |
/linux/drivers/accel/habanalabs/gaudi/ |
H A D | gaudi.c | 479 static int gaudi_mmu_update_asid_hop0_addr(struct hl_device *hdev, u32 asid, 492 static void gaudi_mmu_prepare(struct hl_device *hdev, u32 asid); 5847 static int gaudi_context_switch(struct hl_device *hdev, u32 asid) in gaudi_context_switch() argument 6044 void gaudi_mmu_prepare_reg(struct hl_device *hdev, u64 reg, u32 asid) in gaudi_mmu_prepare_reg() argument 6048 WREG32_OR(reg, asid); in gaudi_mmu_prepare_reg() 6051 static void gaudi_mmu_prepare(struct hl_device *hdev, u32 asid) in gaudi_mmu_prepare() argument 6058 if (asid & ~DMA0_QM_GLBL_NON_SECURE_PROPS_0_ASID_MASK) { in gaudi_mmu_prepare() 6059 dev_crit(hdev->dev, "asid %u is too big\n", asid); in gaudi_mmu_prepare() 6063 gaudi_mmu_prepare_reg(hdev, mmDMA0_QM_GLBL_NON_SECURE_PROPS_0, asid); in gaudi_mmu_prepare() 6064 gaudi_mmu_prepare_reg(hdev, mmDMA0_QM_GLBL_NON_SECURE_PROPS_1, asid); in gaudi_mmu_prepare() [all …]
|
/linux/drivers/iommu/arm/arm-smmu/ |
H A D | qcom_iommu.c | 62 u8 asid; /* asid and ctx bank # are 1:1 */ member 82 static struct qcom_iommu_ctx * to_ctx(struct qcom_iommu_domain *d, unsigned asid) in to_ctx() argument 87 return qcom_iommu->ctxs[asid]; in to_ctx() 141 iommu_writel(ctx, ARM_SMMU_CB_S1_TLBIASID, ctx->asid); in qcom_iommu_tlb_inv_context() 161 iova |= ctx->asid; in qcom_iommu_tlb_inv_range_nosync() 207 fsr, iova, fsynr, ctx->asid); in qcom_iommu_fault() 258 ret = qcom_scm_restore_sec_cfg(qcom_iommu->sec_id, ctx->asid); in qcom_iommu_init_domain() 282 FIELD_PREP(ARM_SMMU_TTBRn_ASID, ctx->asid)); in qcom_iommu_init_domain() 554 unsigned asid = args->args[0]; in qcom_iommu_of_xlate() local 572 if (WARN_ON(asid > qcom_iommu->max_asid) || in qcom_iommu_of_xlate() [all …]
|
/linux/arch/mips/lib/ |
H A D | r3k_dump_tlb.c | 27 unsigned int asid; in dump_tlb() local 31 asid = read_c0_entryhi() & asid_mask; in dump_tlb() 46 (entryhi & asid_mask) == asid)) { in dump_tlb() 65 write_c0_entryhi(asid); in dump_tlb()
|
/linux/drivers/gpu/drm/xe/ |
H A D | xe_gt_pagefault.c | 26 u32 asid; member 53 u32 asid; member 187 static struct xe_vm *asid_to_vm(struct xe_device *xe, u32 asid) in asid_to_vm() argument 192 vm = xa_load(&xe->usm.asid_to_vm, asid); in asid_to_vm() 214 vm = asid_to_vm(xe, pf->asid); in handle_pagefault() 268 pf->asid, pf->vfid, pf->pdata, upper_32_bits(pf->page_addr), in print_pagefault() 293 pf->asid = FIELD_GET(PFD_ASID, desc->dw1); in get_pagefault() 326 u32 asid; in xe_guc_pagefault_handler() local 332 asid = FIELD_GET(PFD_ASID, msg[1]); in xe_guc_pagefault_handler() 333 pf_queue = gt->usm.pf_queue + (asid % NUM_PF_QUEUE); in xe_guc_pagefault_handler() [all …]
|
H A D | xe_trace_bo.h | 90 __field(u32, asid) 99 __entry->asid = xe_vma_vm(vma)->usm.asid; 106 __get_str(dev), __entry->vma, __entry->asid, __entry->start, 187 __field(u32, asid) 193 __entry->asid = vm->usm.asid; 197 __entry->vm, __entry->asid)
|
/linux/arch/csky/include/asm/ |
H A D | asid.h | 46 u64 asid, old_active_asid; in asid_check_context() local 48 asid = atomic64_read(pasid); in asid_check_context() 66 !((asid ^ atomic64_read(&info->generation)) >> info->bits) && in asid_check_context() 68 old_active_asid, asid)) in asid_check_context()
|
H A D | mmu_context.h | 17 #define cpu_asid(mm) (atomic64_read(&mm->context.asid) & ASID_MASK) 19 #define init_new_context(tsk,mm) ({ atomic64_set(&(mm)->context.asid, 0); 0; }) 32 setup_pgd(next->pgd, next->context.asid.counter); in switch_mm()
|
/linux/arch/xtensa/mm/ |
H A D | tlb.c | 71 mm->context.asid[cpu] = NO_CONTEXT; in local_flush_tlb_mm() 75 mm->context.asid[cpu] = NO_CONTEXT; in local_flush_tlb_mm() 96 if (mm->context.asid[cpu] == NO_CONTEXT) in local_flush_tlb_range() 100 (unsigned long)mm->context.asid[cpu], start, end); in local_flush_tlb_range() 106 set_rasid_register(ASID_INSERT(mm->context.asid[cpu])); in local_flush_tlb_range() 134 if (mm->context.asid[cpu] == NO_CONTEXT) in local_flush_tlb_page() 140 set_rasid_register(ASID_INSERT(mm->context.asid[cpu])); in local_flush_tlb_page()
|
/linux/arch/loongarch/lib/ |
H A D | dump_tlb.c | 30 unsigned long s_entryhi, entryhi, asid; in dump_tlb() local 52 asid = read_csr_asid(); in dump_tlb() 61 asid != s_asid) in dump_tlb() 73 vwidth, (entryhi & ~0x1fffUL), asidwidth, asid & asidmask); in dump_tlb()
|
/linux/arch/mips/mm/ |
H A D | context.c | 24 u64 asid; in get_new_mmu_context() local 34 asid = asid_cache(cpu); in get_new_mmu_context() 36 if (!((asid += cpu_asid_inc()) & cpu_asid_mask(&cpu_data[cpu]))) { in get_new_mmu_context() 42 set_cpu_context(cpu, mm, asid); in get_new_mmu_context() 43 asid_cache(cpu) = asid; in get_new_mmu_context()
|