/linux/arch/arm64/mm/ |
H A D | context.c | 38 #define ctxid2asid(asid) ((asid) & ~ASID_MASK) argument 39 #define asid2ctxid(asid, genid) ((asid) | (genid)) argument 44 u32 asid; in get_cpu_asid_bits() local 50 pr_warn("CPU%d: Unknown ASID size (%d); assuming 8-bit\n", in get_cpu_asid_bits() 54 asid = 8; in get_cpu_asid_bits() 57 asid = 16; in get_cpu_asid_bits() 60 return asid; in get_cpu_asid_bits() 66 u32 asid = get_cpu_asid_bits(); in verify_cpu_asid_bits() local 68 if (asid < asid_bits) { in verify_cpu_asid_bits() 70 * We cannot decrease the ASID size at runtime, so panic if we support in verify_cpu_asid_bits() [all …]
|
/linux/arch/csky/mm/ |
H A D | asid.c | 3 * Generic ASID allocator. 14 #include <asm/asid.h> 21 #define asid2idx(info, asid) (((asid) & ~ASID_MASK(info)) >> (info)->ctxt_shift) argument 27 u64 asid; in flush_context() local 29 /* Update the list of reserved ASIDs and the ASID bitmap. */ in flush_context() 33 asid = atomic64_xchg_relaxed(&active_asid(info, i), 0); in flush_context() 38 * ASID, as this is the only trace we have of in flush_context() 41 if (asid == 0) in flush_context() 42 asid = reserved_asid(info, i); in flush_context() 43 __set_bit(asid2idx(info, asid), info->map); in flush_context() [all …]
|
/linux/arch/arm/mm/ |
H A D | context.c | 27 * | process ID | ASID | 32 * The ASID is used to tag entries in the CPU caches and TLBs. 56 u64 context_id, asid; in a15_erratum_get_cpumask() local 65 * running the same ASID as the one being invalidated. in a15_erratum_get_cpumask() 67 asid = per_cpu(active_asids, cpu).counter; in a15_erratum_get_cpumask() 68 if (asid == 0) in a15_erratum_get_cpumask() 69 asid = per_cpu(reserved_asids, cpu); in a15_erratum_get_cpumask() 70 if (context_id == asid) in a15_erratum_get_cpumask() 79 * With LPAE, the ASID and page tables are updated atomicly, so there is 80 * no need for a reserved set of tables (the active ASID tracking prevents [all …]
|
/linux/arch/arc/include/asm/ |
H A D | mmu_context.h | 10 * -Major rewrite of Core ASID allocation routine get_new_mmu_context 23 /* ARC ASID Management 25 * MMU tags TLBs with an 8-bit ASID, avoiding need to flush the TLB on 28 * ASID is managed per cpu, so task threads across CPUs can have different 29 * ASID. Global ASID management is needed if hardware supports TLB shootdown 32 * Each task is assigned unique ASID, with a simple round-robin allocator 36 * A new allocation cycle, post rollover, could potentially reassign an ASID 37 * to a different task. Thus the rule is to refresh the ASID in a new cycle. 38 * The 32 bit @asid_cpu (and mm->asid) have 8 bits MMU PID and rest 24 bits 49 #define asid_mm(mm, cpu) mm->context.asid[cpu] [all …]
|
/linux/arch/xtensa/include/asm/ |
H A D | mmu_context.h | 38 * NO_CONTEXT is the invalid ASID value that we don't ever assign to 72 unsigned long asid = cpu_asid_cache(cpu); in get_new_mmu_context() local 73 if ((++asid & ASID_MASK) == 0) { in get_new_mmu_context() 75 * Start new asid cycle; continue counting with next in get_new_mmu_context() 79 asid += ASID_USER_FIRST; in get_new_mmu_context() 81 cpu_asid_cache(cpu) = asid; in get_new_mmu_context() 82 mm->context.asid[cpu] = asid; in get_new_mmu_context() 89 * Check if our ASID is of an older version and thus invalid. in get_mmu_context() 93 unsigned long asid = mm->context.asid[cpu]; in get_mmu_context() local 95 if (asid == NO_CONTEXT || in get_mmu_context() [all …]
|
/linux/arch/sh/mm/ |
H A D | tlbflush_32.c | 21 unsigned long asid; in local_flush_tlb_page() local 24 asid = cpu_asid(cpu, vma->vm_mm); in local_flush_tlb_page() 30 set_asid(asid); in local_flush_tlb_page() 32 local_flush_tlb_one(asid, page); in local_flush_tlb_page() 56 unsigned long asid; in local_flush_tlb_range() local 59 asid = cpu_asid(cpu, mm); in local_flush_tlb_range() 65 set_asid(asid); in local_flush_tlb_range() 68 local_flush_tlb_one(asid, start); in local_flush_tlb_range() 89 unsigned long asid; in local_flush_tlb_kernel_range() local 92 asid = cpu_asid(cpu, &init_mm); in local_flush_tlb_kernel_range() [all …]
|
/linux/drivers/misc/sgi-gru/ |
H A D | grumain.c | 52 /*--------- ASID Management ------------------------------------------- 58 * asid in use ("x"s below). Set "limit" to this value. 66 * Each time MAX_ASID is reached, increment the asid generation. Since 69 * a context, the asid generation of the GTS asid is rechecked. If it 70 * doesn't match the current generation, a new asid will be assigned. 75 * All asid manipulation & context loading/unloading is protected by the 79 /* Hit the asid limit. Start over */ 89 static int gru_reset_asid_limit(struct gru_state *gru, int asid) in gru_reset_asid_limit() argument 93 gru_dbg(grudev, "gid %d, asid 0x%x\n", gru->gs_gid, asid); in gru_reset_asid_limit() 96 if (asid >= limit) in gru_reset_asid_limit() [all …]
|
H A D | grutlbpurge.c | 89 * the ASID invalidated. Invalidating an ASID causes a new ASID to be assigned 122 * - asid[maxgrus] array. ASIDs are assigned to a GRU when a context is 132 * asid is non-zero. 136 * - if the ctxtmap is zero, no context is active. Set the ASID to 150 int grupagesize, pagesize, pageshift, gid, asid; in gru_flush_tlb_range() local 167 asid = asids->mt_asid; in gru_flush_tlb_range() 168 if (asids->mt_ctxbitmap && asid) { in gru_flush_tlb_range() 170 asid = GRUASID(asid, start); in gru_flush_tlb_range() 172 " FLUSH gruid %d, asid 0x%x, vaddr 0x%lx, vamask 0x%x, num %ld, cbmap 0x%x\n", in gru_flush_tlb_range() 173 gid, asid, start, grupagesize, num, asids->mt_ctxbitmap); in gru_flush_tlb_range() [all …]
|
H A D | gruhandles.c | 135 int asid, int pagesize, int global, int n, in tgh_invalidate() argument 139 tgh->asid = asid; in tgh_invalidate() 152 unsigned long vaddr, int asid, int dirty, in tfh_write_only() argument 155 tfh->fillasid = asid; in tfh_write_only() 168 unsigned long vaddr, int asid, int dirty, in tfh_write_restart() argument 171 tfh->fillasid = asid; in tfh_write_restart()
|
/linux/arch/csky/include/asm/ |
H A D | asid.h | 22 /* Number of ASID allocated by context (shift value) */ 37 * Check the ASID is still valid for the context. If not generate a new ASID. 39 * @pasid: Pointer to the current ASID batch 46 u64 asid, old_active_asid; in asid_check_context() local 48 asid = atomic64_read(pasid); in asid_check_context() 52 * If our active_asid is non-zero and the ASID matches the current in asid_check_context() 60 * - We get a valid ASID back from the cmpxchg, which means the in asid_check_context() 66 !((asid ^ atomic64_read(&info->generation)) >> info->bits) && in asid_check_context() 68 old_active_asid, asid)) in asid_check_context()
|
/linux/arch/sh/include/asm/ |
H A D | mmu_context_32.h | 6 static inline void set_asid(unsigned long asid) in set_asid() argument 8 __raw_writel(asid, MMU_PTEAEX); in set_asid() 16 static inline void set_asid(unsigned long asid) in set_asid() argument 25 : "r" (asid), "m" (__m(MMU_PTEH)), in set_asid() 31 unsigned long asid; in get_asid() local 34 : "=r" (asid) in get_asid() 36 asid &= MMU_CONTEXT_ASID_MASK; in get_asid() 37 return asid; in get_asid()
|
H A D | mmu_context.h | 6 * ASID handling idea taken from MIPS implementation. 22 * (b) ASID (Address Space IDentifier) 33 /* Impossible ASID value, to differentiate from NO_CONTEXT. */ 57 unsigned long asid = asid_cache(cpu); in get_mmu_context() local 60 if (((cpu_context(cpu, mm) ^ asid) & MMU_CONTEXT_VERSION_MASK) == 0) in get_mmu_context() 65 if (!(++asid & MMU_CONTEXT_ASID_MASK)) { in get_mmu_context() 67 * We exhaust ASID of this version. in get_mmu_context() 76 if (!asid) in get_mmu_context() 77 asid = MMU_CONTEXT_FIRST_VERSION; in get_mmu_context() 80 cpu_context(cpu, mm) = asid_cache(cpu) = asid; in get_mmu_context() [all …]
|
/linux/arch/x86/mm/ |
H A D | tlb.c | 63 * to what is traditionally called ASID on the RISC processors. 65 * We don't use the traditional ASID implementation, where each process/mm gets 66 * its own ASID and flush/restart when we run out of ASID space. 75 * ASID - [0, TLB_NR_DYN_ASIDS-1] 80 * ASID+1, because PCID 0 is special. 84 * PCID values, but we can still do with a single ASID denomination 109 * Given @asid, compute kPCID 111 static inline u16 kern_pcid(u16 asid) in kern_pcid() argument 113 VM_WARN_ON_ONCE(asid > MAX_ASID_AVAILABLE); in kern_pcid() 117 * Make sure that the dynamic ASID space does not conflict with the in kern_pcid() [all …]
|
/linux/drivers/vhost/ |
H A D | vdpa.c | 71 u64 last, u32 asid); 80 static struct vhost_vdpa_as *asid_to_as(struct vhost_vdpa *v, u32 asid) in asid_to_as() argument 82 struct hlist_head *head = &v->as[asid % VHOST_VDPA_IOTLB_BUCKETS]; in asid_to_as() 86 if (as->id == asid) in asid_to_as() 92 static struct vhost_iotlb *asid_to_iotlb(struct vhost_vdpa *v, u32 asid) in asid_to_iotlb() argument 94 struct vhost_vdpa_as *as = asid_to_as(v, asid); in asid_to_iotlb() 102 static struct vhost_vdpa_as *vhost_vdpa_alloc_as(struct vhost_vdpa *v, u32 asid) in vhost_vdpa_alloc_as() argument 104 struct hlist_head *head = &v->as[asid % VHOST_VDPA_IOTLB_BUCKETS]; in vhost_vdpa_alloc_as() 107 if (asid_to_as(v, asid)) in vhost_vdpa_alloc_as() 110 if (asid >= v->vdpa->nas) in vhost_vdpa_alloc_as() [all …]
|
/linux/drivers/iommu/arm/arm-smmu-v3/ |
H A D | arm-smmu-v3-sva.c | 55 u16 asid) in arm_smmu_make_sva_cd() argument 77 FIELD_PREP(CTXDESC_CD_0_ASID, asid)); in arm_smmu_make_sva_cd() 151 arm_smmu_tlb_inv_asid(smmu_domain->smmu, smmu_domain->cd.asid); in arm_smmu_mm_arch_invalidate_secondary_tlbs() 153 arm_smmu_tlb_inv_range_asid(start, size, smmu_domain->cd.asid, in arm_smmu_mm_arch_invalidate_secondary_tlbs() 181 smmu_domain->cd.asid); in arm_smmu_mm_release() 187 arm_smmu_tlb_inv_asid(smmu_domain->smmu, smmu_domain->cd.asid); in arm_smmu_mm_release() 350 arm_smmu_make_sva_cd(&target, master, domain->mm, smmu_domain->cd.asid); in arm_smmu_sva_set_dev_pasid() 362 * Ensure the ASID is empty in the iommu cache before allowing reuse. in arm_smmu_sva_domain_free() 364 arm_smmu_tlb_inv_asid(smmu_domain->smmu, smmu_domain->cd.asid); in arm_smmu_sva_domain_free() 368 * still be called/running at this point. We allow the ASID to be in arm_smmu_sva_domain_free() [all …]
|
/linux/drivers/accel/habanalabs/common/ |
H A D | asid.c | 20 /* ASID 0 is reserved for the kernel driver and device CPU */ in hl_asid_init() 50 void hl_asid_free(struct hl_device *hdev, unsigned long asid) in hl_asid_free() argument 52 if (asid == HL_KERNEL_ASID_ID || asid >= hdev->asic_prop.max_asid) { in hl_asid_free() 53 dev_crit(hdev->dev, "Invalid ASID %lu", asid); in hl_asid_free() 57 clear_bit(asid, hdev->asid_bitmap); in hl_asid_free()
|
H A D | context.c | 104 if (ctx->asid != HL_KERNEL_ASID_ID) { in hl_ctx_fini() 105 dev_dbg(hdev->dev, "closing user context, asid=%u\n", ctx->asid); in hl_ctx_fini() 120 hl_asid_free(hdev, ctx->asid); in hl_ctx_fini() 229 ctx->asid = HL_KERNEL_ASID_ID; /* Kernel driver gets ASID 0 */ in hl_ctx_init() 243 ctx->asid = hl_asid_alloc(hdev); in hl_ctx_init() 244 if (!ctx->asid) { in hl_ctx_init() 245 dev_err(hdev->dev, "No free ASID, failed to create context\n"); in hl_ctx_init() 274 dev_dbg(hdev->dev, "create user context, comm=\"%s\", asid=%u\n", in hl_ctx_init() 275 get_task_comm(task_comm, current), ctx->asid); in hl_ctx_init() 285 if (ctx->asid != HL_KERNEL_ASID_ID) in hl_ctx_init() [all …]
|
/linux/arch/arm64/include/asm/ |
H A D | tlbflush.h | 58 #define __TLBI_VADDR(addr, asid) \ argument 62 __ta |= (unsigned long)(asid) << 48; \ 132 * | ASID | TG | SCALE | NUM | TTL | BADDR | 152 #define __TLBI_VADDR_RANGE(baddr, asid, scale, num, ttl) \ argument 161 __ta |= FIELD_PREP(TLBIR_ASID_MASK, asid); \ 208 * The 'mm' argument identifies the ASID to invalidate. 275 unsigned long asid; in flush_tlb_mm() local 278 asid = __TLBI_VADDR(0, ASID(mm)); in flush_tlb_mm() 279 __tlbi(aside1is, asid); in flush_tlb_mm() 280 __tlbi_user(aside1is, asid); in flush_tlb_mm() [all …]
|
/linux/arch/riscv/kvm/ |
H A D | tlb.c | 78 unsigned long asid, in kvm_riscv_local_hfence_vvma_asid_gva() argument 86 kvm_riscv_local_hfence_vvma_asid_all(vmid, asid); in kvm_riscv_local_hfence_vvma_asid_gva() 96 : : "r" (pos), "r" (asid) : "memory"); in kvm_riscv_local_hfence_vvma_asid_gva() 101 : : "r" (pos), "r" (asid) : "memory"); in kvm_riscv_local_hfence_vvma_asid_gva() 108 unsigned long asid) in kvm_riscv_local_hfence_vvma_asid_all() argument 114 asm volatile(HFENCE_VVMA(zero, %0) : : "r" (asid) : "memory"); in kvm_riscv_local_hfence_vvma_asid_all() 269 READ_ONCE(v->vmid), d.asid, in kvm_riscv_hfence_process() 275 READ_ONCE(v->vmid), d.asid); in kvm_riscv_hfence_process() 340 data.asid = 0; in kvm_riscv_hfence_gvma_vmid_gpa() 358 unsigned long order, unsigned long asid) in kvm_riscv_hfence_vvma_asid_gva() argument [all …]
|
/linux/drivers/iommu/arm/arm-smmu/ |
H A D | qcom_iommu.c | 54 struct qcom_iommu_ctx *ctxs[]; /* indexed by asid */ 62 u8 asid; /* asid and ctx bank # are 1:1 */ member 82 static struct qcom_iommu_ctx * to_ctx(struct qcom_iommu_domain *d, unsigned asid) in to_ctx() argument 87 return qcom_iommu->ctxs[asid]; in to_ctx() 141 iommu_writel(ctx, ARM_SMMU_CB_S1_TLBIASID, ctx->asid); in qcom_iommu_tlb_inv_context() 161 iova |= ctx->asid; in qcom_iommu_tlb_inv_range_nosync() 207 fsr, iova, fsynr, ctx->asid); in qcom_iommu_fault() 258 ret = qcom_scm_restore_sec_cfg(qcom_iommu->sec_id, ctx->asid); in qcom_iommu_init_domain() 282 FIELD_PREP(ARM_SMMU_TTBRn_ASID, ctx->asid)); in qcom_iommu_init_domain() 554 unsigned asid = args->args[0]; in qcom_iommu_of_xlate() local [all …]
|
/linux/tools/perf/pmu-events/arch/riscv/ |
H A D | riscv-sbi-firmware.json | 75 "PublicDescription": "Sent SFENCE.VMA with ASID request to other HART event", 78 "BriefDescription": "Sent SFENCE.VMA with ASID request to other HART event" 81 "PublicDescription": "Received SFENCE.VMA with ASID request from other HART event", 84 "BriefDescription": "Received SFENCE.VMA with ASID request from other HART event" 123 "PublicDescription": "Sent HFENCE.VVMA with ASID request to other HART event", 126 "BriefDescription": "Sent HFENCE.VVMA with ASID request to other HART event" 129 "PublicDescription": "Received HFENCE.VVMA with ASID request from other HART event", 132 "BriefDescription": "Received HFENCE.VVMA with ASID request from other HART event"
|
/linux/drivers/vdpa/vdpa_sim/ |
H A D | vdpa_sim.c | 603 unsigned int asid) in vdpasim_set_group_asid() argument 612 if (asid >= vdpasim->dev_attr.nas) in vdpasim_set_group_asid() 615 iommu = &vdpasim->iommu[asid]; in vdpasim_set_group_asid() 629 static int vdpasim_set_map(struct vdpa_device *vdpa, unsigned int asid, in vdpasim_set_map() argument 638 if (asid >= vdpasim->dev_attr.nas) in vdpasim_set_map() 643 iommu = &vdpasim->iommu[asid]; in vdpasim_set_map() 645 vdpasim->iommu_pt[asid] = false; in vdpasim_set_map() 663 static int vdpasim_reset_map(struct vdpa_device *vdpa, unsigned int asid) in vdpasim_reset_map() argument 667 if (asid >= vdpasim->dev_attr.nas) in vdpasim_reset_map() 671 if (vdpasim->iommu_pt[asid]) in vdpasim_reset_map() [all …]
|
/linux/drivers/gpu/drm/xe/ |
H A D | xe_trace_bo.h | 85 __field(u32, asid) 94 __entry->asid = xe_vma_vm(vma)->usm.asid; 100 TP_printk("dev=%s, vma=%p, asid=0x%05x, start=0x%012llx, end=0x%012llx, userptr=0x%012llx,", 101 __get_str(dev), __entry->vma, __entry->asid, __entry->start, 182 __field(u32, asid) 188 __entry->asid = vm->usm.asid; 191 TP_printk("dev=%s, vm=%p, asid=0x%05x", __get_str(dev), 192 __entry->vm, __entry->asid)
|
/linux/drivers/accel/habanalabs/gaudi/ |
H A D | gaudi.c | 479 static int gaudi_mmu_update_asid_hop0_addr(struct hl_device *hdev, u32 asid, 492 static void gaudi_mmu_prepare(struct hl_device *hdev, u32 asid); 1674 /* We only support a single ASID for the user, so for the sake of optimization, just in gaudi_late_init() 1675 * initialize the ASID one time during device initialization with the fixed value of 1 in gaudi_late_init() 3657 "failed to set hop0 addr for asid %d\n", i); in gaudi_mmu_init() 5847 static int gaudi_context_switch(struct hl_device *hdev, u32 asid) in gaudi_context_switch() argument 5967 * using the compute ctx ASID, if exists. If not, use the kernel ctx in gaudi_debugfs_read_dma() 5968 * ASID in gaudi_debugfs_read_dma() 6006 * using the compute ctx ASID, if exists. If not, use the kernel ctx in gaudi_debugfs_read_dma() 6007 * ASID in gaudi_debugfs_read_dma() [all …]
|
/linux/arch/xtensa/mm/ |
H A D | tlb.c | 58 /* If mm is current, we simply assign the current task a new ASID, thus, 71 mm->context.asid[cpu] = NO_CONTEXT; in local_flush_tlb_mm() 75 mm->context.asid[cpu] = NO_CONTEXT; in local_flush_tlb_mm() 96 if (mm->context.asid[cpu] == NO_CONTEXT) in local_flush_tlb_range() 100 (unsigned long)mm->context.asid[cpu], start, end); in local_flush_tlb_range() 106 set_rasid_register(ASID_INSERT(mm->context.asid[cpu])); in local_flush_tlb_range() 134 if (mm->context.asid[cpu] == NO_CONTEXT) in local_flush_tlb_page() 140 set_rasid_register(ASID_INSERT(mm->context.asid[cpu])); in local_flush_tlb_page() 223 * Check that TLB entries with kernel ASID (1) have kernel VMA (>= TASK_SIZE), 224 * and TLB entries with user ASID (>=4) have VMA < TASK_SIZE.
|