| /linux/drivers/iommu/arm/arm-smmu/ |
| H A D | qcom_iommu.c | 1 // SPDX-License-Identifier: GPL-2.0-only 3 * IOMMU API for QCOM secure IOMMUs. Somewhat based on arm-smmu.c 13 #include <linux/dma-mapping.h> 17 #include <linux/io-64-nonatomic-hi-lo.h> 18 #include <linux/io-pgtable.h> 33 #include "arm-smmu.h" 54 struct qcom_iommu_ctx *ctxs[]; /* indexed by asid */ 62 u8 asid; /* asid and ctx bank # are 1:1 */ member 82 static struct qcom_iommu_ctx * to_ctx(struct qcom_iommu_domain *d, unsigned asid) in to_ctx() argument 84 struct qcom_iommu_dev *qcom_iommu = d->iommu; in to_ctx() [all …]
|
| /linux/arch/mips/mm/ |
| H A D | context.c | 1 // SPDX-License-Identifier: GPL-2.0 24 u64 asid; in get_new_mmu_context() local 34 asid = asid_cache(cpu); in get_new_mmu_context() 36 if (!((asid += cpu_asid_inc()) & cpu_asid_mask(&cpu_data[cpu]))) { in get_new_mmu_context() 39 local_flush_tlb_all(); /* start new asid cycle */ in get_new_mmu_context() 42 set_cpu_context(cpu, mm, asid); in get_new_mmu_context() 43 asid_cache(cpu) = asid; in get_new_mmu_context() 58 /* Check if our ASID is of an older version and thus invalid */ in check_mmu_context() 94 * context-switch in flush_context() 146 * We had a valid MMID in a previous life, so try to re-use in get_new_mmid() [all …]
|
| /linux/drivers/accel/habanalabs/common/mmu/ |
| H A D | mmu.c | 1 // SPDX-License-Identifier: GPL-2.0 4 * Copyright 2016-2022 HabanaLabs, Ltd. 16 * hl_mmu_get_funcs() - get MMU functions structure 26 return &hdev->mmu_func[pgt_residency]; in hl_mmu_get_funcs() 31 struct asic_fixed_properties *prop = &hdev->asic_prop; in hl_is_dram_va() 33 return hl_mem_area_inside_range(virt_addr, prop->dmmu.page_size, in hl_is_dram_va() 34 prop->dmmu.start_addr, in hl_is_dram_va() 35 prop->dmmu.end_addr); in hl_is_dram_va() 39 * hl_mmu_init() - initialize the MMU module. 42 * Return: 0 for success, non-zero for failure. [all …]
|
| H A D | mmu_v1.c | 1 // SPDX-License-Identifier: GPL-2.0 4 * Copyright 2016-2019 HabanaLabs, Ltd. 15 static inline u64 get_hop_pte_addr(struct hl_ctx *ctx, struct hl_mmu_properties *mmu_prop, in get_hop_pte_addr() argument 20 mask = mmu_prop->hop_masks[hop_idx]; in get_hop_pte_addr() 21 shift = mmu_prop->hop_shifts[hop_idx]; in get_hop_pte_addr() 23 ctx->hdev->asic_prop.mmu_pte_size * ((virt_addr & mask) >> shift); in get_hop_pte_addr() 26 static int dram_default_mapping_init(struct hl_ctx *ctx) in dram_default_mapping_init() argument 28 struct hl_device *hdev = ctx->hdev; in dram_default_mapping_init() 29 struct asic_fixed_properties *prop = &hdev->asic_prop; in dram_default_mapping_init() 34 if ((!prop->dram_supports_virtual_memory) || in dram_default_mapping_init() [all …]
|
| H A D | mmu_v2_hr.c | 1 // SPDX-License-Identifier: GPL-2.0 4 * Copyright 2020-2022 HabanaLabs, Ltd. 13 static struct pgt_info *hl_mmu_v2_hr_get_pgt_info(struct hl_ctx *ctx, u64 phys_hop_addr) in hl_mmu_v2_hr_get_pgt_info() argument 17 hash_for_each_possible(ctx->hr_mmu_phys_hash, pgt_info, node, in hl_mmu_v2_hr_get_pgt_info() 19 if (phys_hop_addr == pgt_info->phys_addr) in hl_mmu_v2_hr_get_pgt_info() 25 static void hl_mmu_v2_hr_add_pgt_info(struct hl_ctx *ctx, struct pgt_info *pgt_info, in hl_mmu_v2_hr_add_pgt_info() argument 28 hash_add(ctx->hr_mmu_phys_hash, &pgt_info->node, phys_addr); in hl_mmu_v2_hr_add_pgt_info() 31 static struct pgt_info *hl_mmu_v2_hr_get_hop0_pgt_info(struct hl_ctx *ctx) in hl_mmu_v2_hr_get_hop0_pgt_info() argument 33 return &ctx->hdev->mmu_priv.hr.mmu_asid_hop0[ctx->asid]; in hl_mmu_v2_hr_get_hop0_pgt_info() 37 * hl_mmu_v2_hr_init() - initialize the MMU module. [all …]
|
| H A D | mmu_v2.c | 1 // SPDX-License-Identifier: GPL-2.0 4 * Copyright 2016-2020 HabanaLabs, Ltd. 15 * hl_mmu_v2_ctx_init() - initialize a context for using the MMU module. 16 * @ctx: pointer to the context structure to initialize. 20 * Return: 0 on success, non-zero otherwise. 22 static int hl_mmu_v2_ctx_init(struct hl_ctx *ctx) in hl_mmu_v2_ctx_init() argument 24 hash_init(ctx->mmu_shadow_hash); in hl_mmu_v2_ctx_init() 30 * hl_mmu_v2_ctx_fini - disable a ctx from using the mmu module 32 * @ctx: pointer to the context structure 35 * - Free any pgts which were not freed yet [all …]
|
| /linux/arch/mips/include/asm/ |
| H A D | mmu_context.h | 26 #include <asm-generic/mm_hooks.h> 80 * allow the kernel to create wired entries with the MMID of current->active_mm 92 * as a software asid extension. 98 return ~(u64)(asid_mask | (asid_mask - 1)); in asid_version_mask() 109 return atomic64_read(&mm->context.mmid); in cpu_context() 111 return mm->context.asid[cpu]; in cpu_context() 115 struct mm_struct *mm, u64 ctx) in set_cpu_context() argument 118 atomic64_set(&mm->context.mmid, ctx); in set_cpu_context() 120 mm->context.asid[cpu] = ctx; in set_cpu_context() 148 mm->context.bd_emupage_allocmap = NULL; in init_new_context() [all …]
|
| /linux/arch/m68k/include/asm/ |
| H A D | mmu_context.h | 1 /* SPDX-License-Identifier: GPL-2.0 */ 5 #include <asm-generic/mm_hooks.h> 30 mm_context_t ctx; in get_mmu_context() local 32 if (mm->context != NO_CONTEXT) in get_mmu_context() 38 ctx = next_mmu_context; in get_mmu_context() 39 while (test_and_set_bit(ctx, context_map)) { in get_mmu_context() 40 ctx = find_next_zero_bit(context_map, LAST_CONTEXT+1, ctx); in get_mmu_context() 41 if (ctx > LAST_CONTEXT) in get_mmu_context() 42 ctx = 0; in get_mmu_context() 44 next_mmu_context = (ctx + 1) & LAST_CONTEXT; in get_mmu_context() [all …]
|
| /linux/drivers/gpu/drm/xe/ |
| H A D | xe_pagefault.c | 1 // SPDX-License-Identifier: MIT 32 * a multi-threaded worker to process them. Multiple producers are supported, 62 return need_vram_move ? xe_bo_migrate(bo, vram->placement, NULL, exec) : in xe_pagefault_begin() 71 struct xe_validation_ctx ctx; in xe_pagefault_handle_vma() local 76 lockdep_assert_held_write(&vm->lock); in xe_pagefault_handle_vma() 78 needs_vram = xe_vma_need_vram_for_atomic(vm->xe, vma, atomic); in xe_pagefault_handle_vma() 80 return needs_vram < 0 ? needs_vram : -EACCES; in xe_pagefault_handle_vma() 89 if (xe_vm_has_valid_gpu_mapping(tile, vma->tile_present, in xe_pagefault_handle_vma() 90 vma->tile_invalidated) && !atomic) in xe_pagefault_handle_vma() 103 /* Lock VM and BOs dma-resv */ in xe_pagefault_handle_vma() [all …]
|
| H A D | xe_svm.c | 1 // SPDX-License-Identifier: MIT 31 .__flags = READ_ONCE(range->base.pages.flags.__flags), in xe_svm_range_in_vram() 40 return xe_svm_range_in_vram(range) && range->tile_present; in xe_svm_range_has_vram_binding() 50 return gpusvm_to_vm(r->gpusvm); in range_to_vm() 54 vm_dbg(&range_to_vm(&(r__)->base)->xe->drm, \ 55 "%s: asid=%u, gpusvm=%p, vram=%d,%d, seqno=%lu, " \ 57 (operation__), range_to_vm(&(r__)->base)->usm.asid, \ 58 (r__)->base.gpusvm, \ 61 (r__)->base.pages.notifier_seq, \ 79 INIT_LIST_HEAD(&range->garbage_collector_link); in xe_svm_range_alloc() [all …]
|
| /linux/Documentation/devicetree/bindings/iommu/ |
| H A D | qcom,iommu.yaml | 1 # SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) 3 --- 5 $schema: http://devicetree.org/meta-schemas/core.yaml# 10 - Konrad Dybcio <konradybcio@kernel.org> 13 Qualcomm "B" family devices which are not compatible with arm-smmu have 16 to non-secure vs secure interrupt line. 21 - items: 22 - enum: 23 - qcom,msm8916-iommu 24 - qcom,msm8917-iommu [all …]
|
| /linux/drivers/accel/habanalabs/common/ |
| H A D | command_buffer.c | 1 // SPDX-License-Identifier: GPL-2.0 4 * Copyright 2016-2019 HabanaLabs, Ltd. 17 static int cb_map_mem(struct hl_ctx *ctx, struct hl_cb *cb) in cb_map_mem() argument 19 struct hl_device *hdev = ctx->hdev; in cb_map_mem() 20 struct asic_fixed_properties *prop = &hdev->asic_prop; in cb_map_mem() 21 u32 page_size = prop->pmmu.page_size; in cb_map_mem() 24 if (!hdev->supports_cb_mapping) { in cb_map_mem() 25 dev_err_ratelimited(hdev->dev, in cb_map_mem() 27 return -EINVAL; in cb_map_mem() 30 if (cb->is_mmu_mapped) in cb_map_mem() [all …]
|
| /linux/drivers/misc/sgi-gru/ |
| H A D | grumain.c | 1 // SPDX-License-Identifier: GPL-2.0-or-later 52 /*--------- ASID Management ------------------------------------------- 58 * asid in use ("x"s below). Set "limit" to this value. 66 * Each time MAX_ASID is reached, increment the asid generation. Since 67 * the search for in-use asids only checks contexts with GRUs currently 69 * a context, the asid generation of the GTS asid is rechecked. If it 70 * doesn't match the current generation, a new asid will be assigned. 72 * 0---------------x------------x---------------------x----| 73 * ^-next ^-limit ^-MAX_ASID 75 * All asid manipulation & context loading/unloading is protected by the [all …]
|
| H A D | grutables.h | 1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 28 * +-----------------+ 37 * +-----------------+ 39 * +-----------------+ _______ +-------------+ 45 * |/////////////////| / |-------------| 47 * +-----------------+ | | 49 * +-----------------+ | | 51 * +-----------------+ \____________ +-------------+ 53 * +-----------------+ 55 * +-----------------+ [all …]
|
| H A D | gruprocfs.c | 1 // SPDX-License-Identifier: GPL-2.0-or-later 122 seq_puts(s, "#id count aver-clks max-clks\n"); in mcs_statistics_show() 127 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count, in mcs_statistics_show() 168 seq_puts(file, "# gid bid ctx# asid pid cbrs dsbytes mode\n"); in cch_seq_show() 171 ts = gru->gs_gts[i]; in cch_seq_show() 175 gru->gs_gid, gru->gs_blade_id, i, in cch_seq_show() 176 is_kernel_context(ts) ? 0 : ts->ts_gms->ms_asids[gid].mt_asid, in cch_seq_show() 177 is_kernel_context(ts) ? 0 : ts->ts_tgid_owner, in cch_seq_show() 178 ts->ts_cbr_au_count * GRU_CBR_AU_SIZE, in cch_seq_show() 179 ts->ts_cbr_au_count * GRU_DSR_AU_BYTES, in cch_seq_show() [all …]
|
| /linux/arch/arm64/boot/dts/qcom/ |
| H A D | msm8976.dtsi | 1 // SPDX-License-Identifier: BSD-3-Clause 3 * Copyright (c) 2016-2022, AngeloGioacchino Del Regno 9 #include <dt-bindings/clock/qcom,dsi-phy-28nm.h> 10 #include <dt-bindings/clock/qcom,gcc-msm8976.h> 11 #include <dt-bindings/clock/qcom,rpmcc.h> 12 #include <dt-bindings/gpio/gpio.h> 13 #include <dt-bindings/interrupt-controller/arm-gic.h> 14 #include <dt-bindings/interrupt-controller/irq.h> 15 #include <dt-bindings/power/qcom-rpmpd.h> 18 interrupt-parent = <&intc>; [all …]
|
| /linux/arch/x86/include/asm/ |
| H A D | sev.h | 1 /* SPDX-License-Identifier: GPL-2.0 */ 12 #include <linux/sev-guest.h> 15 #include <asm/sev-common.h> 74 u64 mask = (1ULL << bits) - 1; in lower_bits() 105 * Section 8.14.2.6. Also noted there is the SNP firmware-enforced limit 141 u32 asid; member 208 u8 payload[PAGE_SIZE - sizeof(struct snp_guest_msg_hdr)]; 230 * GUEST_TSC_FREQ * (1 - (TSC_FACTOR * 0.00001)) 234 * GUEST_TSC_FREQ -= (GUEST_TSC_FREQ * TSC_FACTOR) / 100000; 236 #define SNP_SCALE_TSC_FREQ(freq, factor) ((freq) - (freq) * (factor) / 100000) [all …]
|
| /linux/drivers/vhost/ |
| H A D | vhost.c | 1 // SPDX-License-Identifier: GPL-2.0-only 57 #define vhost_used_event(vq) ((__virtio16 __user *)&vq->avail->ring[vq->num]) 58 #define vhost_avail_event(vq) ((__virtio16 __user *)&vq->used->ring[vq->num]) 63 vq->user_be = !virtio_legacy_is_little_endian(); in vhost_disable_cross_endian() 68 vq->user_be = true; in vhost_enable_cross_endian_big() 73 vq->user_be = false; in vhost_enable_cross_endian_little() 80 if (vq->private_data) in vhost_set_vring_endian() 81 return -EBUSY; in vhost_set_vring_endian() 84 return -EFAULT; in vhost_set_vring_endian() 88 return -EINVAL; in vhost_set_vring_endian() [all …]
|
| H A D | vhost.h | 1 /* SPDX-License-Identifier: GPL-2.0 */ 89 struct eventfd_ctx *ctx; member 163 /* Ring endianness requested by userspace for cross-endian support. */ 205 int (*msg_handler)(struct vhost_dev *dev, u32 asid, 213 int (*msg_handler)(struct vhost_dev *dev, u32 asid, 287 if ((vq)->error_ctx) \ 288 eventfd_signal((vq)->error_ctx);\ 326 * vhost_vq_set_backend - Set backend. 331 * Context: Need to call with vq->mutex acquired. 336 vq->private_data = private_data; in vhost_vq_set_backend() [all …]
|
| /linux/drivers/vdpa/mlx5/net/ |
| H A D | mlx5_vnet.c | 1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB 47 #define MLX5_FEATURE(_mvdev, _feature) (!!((_mvdev)->actual_features & BIT_ULL(_feature))) 149 if (!(mvdev->actual_features & BIT_ULL(VIRTIO_NET_F_MQ))) { in is_index_valid() 150 if (!(mvdev->actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ))) in is_index_valid() 156 return idx <= mvdev->max_idx; in is_index_valid() 179 /* TODO: cross-endian support */ 183 (mvdev->actual_features & BIT_ULL(VIRTIO_F_VERSION_1)); in mlx5_vdpa_is_little_endian() 198 if (!(mvdev->actual_features & BIT_ULL(VIRTIO_NET_F_MQ))) in ctrl_vq_idx() 201 return mvdev->max_vqs; in ctrl_vq_idx() 283 struct mlx5_vdpa_dev *mvdev = &ndev->mvdev; in create_tis() [all …]
|
| /linux/drivers/accel/habanalabs/goya/ |
| H A D | goyaP.h | 1 /* SPDX-License-Identifier: GPL-2.0 3 * Copyright 2016-2022 HabanaLabs, Ltd. 126 #define VA_HOST_SPACE_END 0x3FF8000000000ull /* 1PB - 1TB */ 127 #define VA_HOST_SPACE_SIZE (VA_HOST_SPACE_END - \ 132 #define VA_DDR_SPACE_SIZE (VA_DDR_SPACE_END - \ 195 int goya_context_switch(struct hl_device *hdev, u32 asid); 223 int goya_debug_coresight(struct hl_device *hdev, struct hl_ctx *ctx, void *data); 224 void goya_halt_coresight(struct hl_device *hdev, struct hl_ctx *ctx);
|
| H A D | goya.c | 1 // SPDX-License-Identifier: GPL-2.0 4 * Copyright 2016-2022 HabanaLabs, Ltd. 23 * - Range registers (When MMU is enabled, DMA RR does NOT protect host) 24 * - MMU 27 * - Range registers (protect the first 512MB) 28 * - MMU (isolation between users) 31 * - Range registers 32 * - Protection bits 44 * - checks DMA pointer 45 * - WREG, MSG_PROT are not allowed. [all …]
|
| /linux/drivers/accel/habanalabs/gaudi/ |
| H A D | gaudiP.h | 1 /* SPDX-License-Identifier: GPL-2.0 3 * Copyright 2019-2022 HabanaLabs, Ltd. 52 #define GAUDI_HBM_CFG_BASE (mmHBM0_BASE - CFG_BASE) 53 #define GAUDI_HBM_CFG_OFFSET (mmHBM1_BASE - mmHBM0_BASE) 79 #define DMA_QMAN_OFFSET (mmDMA1_QM_BASE - mmDMA0_QM_BASE) 80 #define TPC_QMAN_OFFSET (mmTPC1_QM_BASE - mmTPC0_QM_BASE) 81 #define MME_QMAN_OFFSET (mmMME1_QM_BASE - mmMME0_QM_BASE) 82 #define NIC_MACRO_QMAN_OFFSET (mmNIC1_QM0_BASE - mmNIC0_QM0_BASE) 83 #define NIC_ENGINE_QMAN_OFFSET (mmNIC0_QM1_BASE - mmNIC0_QM0_BASE) 85 #define TPC_CFG_OFFSET (mmTPC1_CFG_BASE - mmTPC0_CFG_BASE) [all …]
|
| /linux/drivers/iommu/ |
| H A D | msm_iommu.c | 1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. 13 #include <linux/io-pgtable.h> 25 #include "msm_iommu_hw-8xxx.h" 58 ret = clk_enable(iommu->pclk); in __enable_clocks() 62 if (iommu->clk) { in __enable_clocks() 63 ret = clk_enable(iommu->clk); in __enable_clocks() 65 clk_disable(iommu->pclk); in __enable_clocks() 73 if (iommu->clk) in __disable_clocks() 74 clk_disable(iommu->clk); in __disable_clocks() [all …]
|
| /linux/drivers/gpu/drm/msm/ |
| H A D | msm_gpu.h | 1 /* SPDX-License-Identifier: GPL-2.0-only */ 10 #include <linux/adreno-smmu-priv.h> 49 int (*get_param)(struct msm_gpu *gpu, struct msm_context *ctx, 51 int (*set_param)(struct msm_gpu *gpu, struct msm_context *ctx, 112 int asid; member 116 * struct msm_gpu_devfreq - devfreq related state 159 * Used to delay clamping to idle freq on active->idle transition. 210 * TODO move to per-ring locking where feasible (ie. submit/retire 263 /* work for handling active-list retiring: */ 284 * switch-over happened early enough in mesa a6xx bringup that we [all …]
|