/linux/drivers/gpu/drm/nouveau/nvif/ |
H A D | mmu.c | 22 #include <nvif/mmu.h> 28 nvif_mmu_dtor(struct nvif_mmu *mmu) in nvif_mmu_dtor() argument 30 if (!nvif_object_constructed(&mmu->object)) in nvif_mmu_dtor() 33 kfree(mmu->kind); in nvif_mmu_dtor() 34 kfree(mmu->type); in nvif_mmu_dtor() 35 kfree(mmu->heap); in nvif_mmu_dtor() 36 nvif_object_dtor(&mmu->object); in nvif_mmu_dtor() 41 struct nvif_mmu *mmu) in nvif_mmu_ctor() argument 44 { NVIF_CLASS_MEM_GF100, -1 }, in nvif_mmu_ctor() 45 { NVIF_CLASS_MEM_NV50 , -1 }, in nvif_mmu_ctor() [all …]
|
H A D | mem.c | 28 nvif_mem_ctor_map(struct nvif_mmu *mmu, const char *name, u8 type, u64 size, in nvif_mem_ctor_map() argument 31 int ret = nvif_mem_ctor(mmu, name, mmu->mem, NVIF_MEM_MAPPABLE | type, in nvif_mem_ctor_map() 34 ret = nvif_object_map(&mem->object, NULL, 0); in nvif_mem_ctor_map() 44 nvif_object_dtor(&mem->object); in nvif_mem_dtor() 48 nvif_mem_ctor_type(struct nvif_mmu *mmu, const char *name, s32 oclass, in nvif_mem_ctor_type() argument 49 int type, u8 page, u64 size, void *argv, u32 argc, in nvif_mem_ctor_type() argument 56 mem->object.client = NULL; in nvif_mem_ctor_type() 57 if (type < 0) in nvif_mem_ctor_type() 58 return -EINVAL; in nvif_mem_ctor_type() 62 return -ENOMEM; in nvif_mem_ctor_type() [all …]
|
/linux/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ |
H A D | base.c | 42 nvkm_mmu_ptp_put(struct nvkm_mmu *mmu, bool force, struct nvkm_mmu_pt *pt) in nvkm_mmu_ptp_put() argument 44 const int slot = pt->base >> pt->ptp->shift; in nvkm_mmu_ptp_put() 45 struct nvkm_mmu_ptp *ptp = pt->ptp; in nvkm_mmu_ptp_put() 50 if (!ptp->free) in nvkm_mmu_ptp_put() 51 list_add(&ptp->head, &mmu->ptp.list); in nvkm_mmu_ptp_put() 52 ptp->free |= BIT(slot); in nvkm_mmu_ptp_put() 54 /* If there's no more sub-allocations, destroy PTP. */ in nvkm_mmu_ptp_put() 55 if (ptp->free == ptp->mask) { in nvkm_mmu_ptp_put() 56 nvkm_mmu_ptc_put(mmu, force, &ptp->pt); in nvkm_mmu_ptp_put() 57 list_del(&ptp->head); in nvkm_mmu_ptp_put() [all …]
|
H A D | ummu.c | 35 struct nvkm_mmu *mmu = nvkm_ummu(object)->mmu; in nvkm_ummu_sclass() local 37 if (mmu->func->mem.user.oclass) { in nvkm_ummu_sclass() 38 if (index-- == 0) { in nvkm_ummu_sclass() 39 oclass->base = mmu->func->mem.user; in nvkm_ummu_sclass() 40 oclass->ctor = nvkm_umem_new; in nvkm_ummu_sclass() 45 if (mmu->func->vmm.user.oclass) { in nvkm_ummu_sclass() 46 if (index-- == 0) { in nvkm_ummu_sclass() 47 oclass->base = mmu->func->vmm.user; in nvkm_ummu_sclass() 48 oclass->ctor = nvkm_uvmm_new; in nvkm_ummu_sclass() 53 return -EINVAL; in nvkm_ummu_sclass() [all …]
|
H A D | umem.c | 37 struct nvkm_client *master = client->object.client; in nvkm_umem_search() 45 spin_lock(&master->lock); in nvkm_umem_search() 46 list_for_each_entry(umem, &master->umem, head) { in nvkm_umem_search() 47 if (umem->object.object == handle) { in nvkm_umem_search() 48 memory = nvkm_memory_ref(umem->memory); in nvkm_umem_search() 52 spin_unlock(&master->lock); in nvkm_umem_search() 56 memory = nvkm_memory_ref(umem->memory); in nvkm_umem_search() 59 return memory ? memory : ERR_PTR(-ENOENT); in nvkm_umem_search() 67 if (!umem->map) in nvkm_umem_unmap() 68 return -EEXIST; in nvkm_umem_unmap() [all …]
|
H A D | mem.c | 33 struct nvkm_mmu *mmu; member 45 return nvkm_mem(memory)->target; in nvkm_mem_target() 58 if (mem->pages == 1 && mem->mem) in nvkm_mem_addr() 59 return mem->dma[0]; in nvkm_mem_addr() 66 return nvkm_mem(memory)->pages << PAGE_SHIFT; in nvkm_mem_size() 75 .memory = &mem->memory, in nvkm_mem_map_dma() 77 .dma = mem->dma, in nvkm_mem_map_dma() 86 if (mem->mem) { in nvkm_mem_dtor() 87 while (mem->pages--) { in nvkm_mem_dtor() 88 dma_unmap_page(mem->mmu->subdev.device->dev, in nvkm_mem_dtor() [all …]
|
H A D | vmmgf100.c | 35 u64 base = (addr >> 8) | map->type; in gf100_vmm_pgt_pte() 38 if (map->ctag && !(map->next & (1ULL << 44))) { in gf100_vmm_pgt_pte() 39 while (ptes--) { in gf100_vmm_pgt_pte() 40 data = base | ((map->ctag >> 1) << 44); in gf100_vmm_pgt_pte() 41 if (!(map->ctag++ & 1)) in gf100_vmm_pgt_pte() 45 base += map->next; in gf100_vmm_pgt_pte() 48 map->type += ptes * map->ctag; in gf100_vmm_pgt_pte() 50 while (ptes--) { in gf100_vmm_pgt_pte() 52 data += map->next; in gf100_vmm_pgt_pte() 68 if (map->page->shift == PAGE_SHIFT) { in gf100_vmm_pgt_dma() [all …]
|
H A D | vmmnv50.c | 35 u64 next = addr + map->type, data; in nv50_vmm_pgt_pte() 39 map->type += ptes * map->ctag; in nv50_vmm_pgt_pte() 42 for (log2blk = 7; log2blk >= 0; log2blk--) { in nv50_vmm_pgt_pte() 49 next += pten * map->next; in nv50_vmm_pgt_pte() 50 ptes -= pten; in nv50_vmm_pgt_pte() 52 while (pten--) in nv50_vmm_pgt_pte() 68 if (map->page->shift == PAGE_SHIFT) { in nv50_vmm_pgt_dma() 70 nvkm_kmap(pt->memory); in nv50_vmm_pgt_dma() 71 while (ptes--) { in nv50_vmm_pgt_dma() 72 const u64 data = *map->dma++ + map->type; in nv50_vmm_pgt_dma() [all …]
|
H A D | nv44.c | 32 nv44_mmu_init(struct nvkm_mmu *mmu) in nv44_mmu_init() argument 34 struct nvkm_device *device = mmu->subdev.device; in nv44_mmu_init() 35 struct nvkm_memory *pt = mmu->vmm->pd->pt[0]->memory; in nv44_mmu_init() 43 addr -= ((nvkm_memory_addr(pt) >> 19) + 1) << 19; in nv44_mmu_init() 46 nvkm_wr32(device, 0x100818, mmu->vmm->null); in nv44_mmu_init() 59 .mmu = {{ -1, -1, NVIF_CLASS_MMU_NV04}}, 60 .mem = {{ -1, -1, NVIF_CLASS_MEM_NV04}, nv04_mem_new, nv04_mem_map }, 61 .vmm = {{ -1, -1, NVIF_CLASS_VMM_NV04}, nv44_vmm_new, true }, 65 nv44_mmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, in nv44_mmu_new() argument 68 if (device->type == NVKM_DEVICE_AGP || in nv44_mmu_new() [all …]
|
H A D | nv41.c | 32 nv41_mmu_init(struct nvkm_mmu *mmu) in nv41_mmu_init() argument 34 struct nvkm_device *device = mmu->subdev.device; in nv41_mmu_init() 35 nvkm_wr32(device, 0x100800, 0x00000002 | mmu->vmm->pd->pt[0]->addr); in nv41_mmu_init() 44 .mmu = {{ -1, -1, NVIF_CLASS_MMU_NV04}}, 45 .mem = {{ -1, -1, NVIF_CLASS_MEM_NV04}, nv04_mem_new, nv04_mem_map }, 46 .vmm = {{ -1, -1, NVIF_CLASS_VMM_NV04}, nv41_vmm_new, true }, 50 nv41_mmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, in nv41_mmu_new() argument 53 if (device->type == NVKM_DEVICE_AGP || in nv41_mmu_new() 54 !nvkm_boolopt(device->cfgopt, "NvPCIE", true)) in nv41_mmu_new() 55 return nv04_mmu_new(device, type, inst, pmmu); in nv41_mmu_new() [all …]
|
/linux/drivers/gpu/drm/msm/ |
H A D | msm_mmu.h | 1 /* SPDX-License-Identifier: GPL-2.0-only */ 13 void (*detach)(struct msm_mmu *mmu); 14 int (*map)(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt, 16 int (*unmap)(struct msm_mmu *mmu, uint64_t iova, size_t len); 17 void (*destroy)(struct msm_mmu *mmu); 18 void (*set_stall)(struct msm_mmu *mmu, bool enable); 32 enum msm_mmu_type type; member 35 static inline void msm_mmu_init(struct msm_mmu *mmu, struct device *dev, in msm_mmu_init() argument 36 const struct msm_mmu_funcs *funcs, enum msm_mmu_type type) in msm_mmu_init() argument 38 mmu->dev = dev; in msm_mmu_init() [all …]
|
H A D | msm_iommu.c | 1 // SPDX-License-Identifier: GPL-2.0-only 7 #include <linux/adreno-smmu-priv.h> 8 #include <linux/io-pgtable.h> 30 static struct msm_iommu_pagetable *to_pagetable(struct msm_mmu *mmu) in to_pagetable() argument 32 return container_of(mmu, struct msm_iommu_pagetable, base); in to_pagetable() 46 pgsizes = pagetable->pgsize_bitmap & GENMASK(__fls(size), 0); in calc_pgsize() 62 pgsizes = pagetable->pgsize_bitmap & ~GENMASK(pgsize_idx, 0); in calc_pgsize() 73 if ((iova ^ paddr) & (pgsize_next - 1)) in calc_pgsize() 77 offset = pgsize_next - (addr_merge & (pgsize_next - 1)); in calc_pgsize() 91 static int msm_iommu_pagetable_unmap(struct msm_mmu *mmu, u64 iova, in msm_iommu_pagetable_unmap() argument [all …]
|
/linux/drivers/gpu/drm/nouveau/ |
H A D | nouveau_mem.c | 46 switch (vmm->object.oclass) { in nouveau_mem_map() 53 args.nv50.kind = mem->kind; in nouveau_mem_map() 54 args.nv50.comp = mem->comp; in nouveau_mem_map() 61 if (mem->mem.type & NVIF_MEM_VRAM) in nouveau_mem_map() 67 args.gf100.kind = mem->kind; in nouveau_mem_map() 72 return -ENOSYS; in nouveau_mem_map() 75 return nvif_vmm_map(vmm, vma->addr, mem->mem.size, &args, argc, &mem->mem, 0); in nouveau_mem_map() 81 nvif_vmm_put(&mem->drm->client.vmm.vmm, &mem->vma[1]); in nouveau_mem_fini() 82 nvif_vmm_put(&mem->drm->client.vmm.vmm, &mem->vma[0]); in nouveau_mem_fini() 83 mutex_lock(&mem->drm->client_mutex); in nouveau_mem_fini() [all …]
|
/linux/arch/riscv/boot/dts/sophgo/ |
H A D | sg2042-cpus.dtsi | 1 // SPDX-License-Identifier: (GPL-2.0 OR MIT) 8 #address-cells = <1>; 9 #size-cells = <0>; 10 timebase-frequency = <50000000>; 12 cpu-map { 260 riscv,isa-base = "rv64i"; 261 riscv,isa-extensions = "i", "m", "a", "f", "d", "c", 265 i-cache-block-size = <64>; 266 i-cache-size = <65536>; 267 i-cache-sets = <512>; [all …]
|
H A D | sg2044-cpus.dtsi | 1 // SPDX-License-Identifier: (GPL-2.0 OR MIT) 7 #address-cells = <2>; 8 #size-cells = <2>; 11 #address-cells = <1>; 12 #size-cells = <0>; 13 timebase-frequency = <50000000>; 18 i-cache-block-size = <64>; 19 i-cache-size = <65536>; 20 i-cache-sets = <512>; 21 d-cache-block-size = <64>; [all …]
|
/linux/drivers/gpu/drm/nouveau/include/nvif/ |
H A D | mmu.h | 27 u8 type; member 29 } *type; member 39 nvif_mmu_kind_valid(struct nvif_mmu *mmu, u8 kind) in nvif_mmu_kind_valid() argument 42 if (kind >= mmu->kind_nr || mmu->kind[kind] == mmu->kind_inv) in nvif_mmu_kind_valid() 49 nvif_mmu_type(struct nvif_mmu *mmu, u8 mask) in nvif_mmu_type() argument 52 for (i = 0; i < mmu->type_nr; i++) { in nvif_mmu_type() 53 if ((mmu->type[i].type & mask) == mask) in nvif_mmu_type() 56 return -EINVAL; in nvif_mmu_type()
|
/linux/arch/m68k/include/asm/ |
H A D | sun3mmu.h | 1 /* SPDX-License-Identifier: GPL-2.0 */ 3 * Definitions for Sun3 custom MMU. 10 #include <asm/sun3-head.h> 12 /* MMU characteristics. */ 19 #define SUN3_PMEG_MASK (SUN3_PMEG_SIZE - 1) 23 #define SUN3_PTE_MASK (SUN3_PTE_SIZE - 1) 32 #define AC_CONTEXT 0x30000000 /* 34c current mmu-context */ 36 #define AC_SYNC_ERR 0x60000000 /* c fault type */ 38 #define AC_ASYNC_ERR 0x60000008 /* c asynchronous fault type */ 44 #define AC_VME_VECTOR 0xE0000000 /* 4 For non-Autovector VME, byte */ [all …]
|
/linux/arch/arm64/kvm/ |
H A D | nested.c | 1 // SPDX-License-Identifier: GPL-2.0-only 3 * Copyright (C) 2017 - Columbia University and Linaro Ltd. 28 /* -1 when not mapped on a CPU */ 39 * Ratio of live shadow S2 MMU per vcpu. This is a trade-off between 48 kvm->arch.nested_mmus = NULL; in kvm_init_nested() 49 kvm->arch.nested_mmus_size = 0; in kvm_init_nested() 50 atomic_set(&kvm->arch.vncr_map_count, 0); in kvm_init_nested() 53 static int init_nested_s2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu) in init_nested_s2_mmu() argument 56 * We only initialise the IPA range on the canonical MMU, which in init_nested_s2_mmu() 65 return kvm_init_stage2_mmu(kvm, mmu, kvm_get_pa_bits(kvm)); in init_nested_s2_mmu() [all …]
|
H A D | mmu.c | 1 // SPDX-License-Identifier: GPL-2.0-only 3 * Copyright (C) 2012 - Virtual Open Systems and Columbia University 44 return (boundary - 1 < end - 1) ? boundary : end; in __stage2_range_addr_end() 61 static int stage2_apply_range(struct kvm_s2_mmu *mmu, phys_addr_t addr, in stage2_apply_range() argument 66 struct kvm *kvm = kvm_s2_mmu_to_kvm(mmu); in stage2_apply_range() 71 struct kvm_pgtable *pgt = mmu->pgt; in stage2_apply_range() 73 return -EINVAL; in stage2_apply_range() 76 ret = fn(pgt, addr, next - addr); in stage2_apply_range() 81 cond_resched_rwlock_write(&kvm->mmu_lock); in stage2_apply_range() 87 #define stage2_apply_range_resched(mmu, addr, end, fn) \ argument [all …]
|
/linux/arch/m68k/include/uapi/asm/ |
H A D | bootinfo.h | 1 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ 3 * asm/bootinfo.h -- Definition of the Linux/m68k boot information structure 57 #define BI_MACHTYPE 0x0001 /* machine type (__be32) */ 58 #define BI_CPUTYPE 0x0002 /* cpu type (__be32) */ 59 #define BI_FPUTYPE 0x0003 /* fpu type (__be32) */ 60 #define BI_MMUTYPE 0x0004 /* mmu type (__be32) */ 70 * - length [ 2 bytes, 16-bit big endian ] 71 * - seed data [ `length` bytes, padded to preserve 4-byte struct alignment ] 97 * CPU, FPU and MMU types (BI_CPUTYPE, BI_FPUTYPE, BI_MMUTYPE) 123 #define FPUB_SUNFPA 4 /* Sun-3 FPA */ [all …]
|
/linux/Documentation/arch/xtensa/ |
H A D | booting.rst | 7 tag value constants. First entry in the list must have type BP_TAG_FIRST, last 8 entry must have type BP_TAG_LAST. The address of the first list entry is 9 passed to the kernel in the register a2. The address type depends on MMU type: 11 - For configurations without MMU, with region protection or with MPU the 13 - For configurations with region translarion MMU or with MMUv3 and CONFIG_MMU=n 16 - For configurations with MMUv2 the address must be a virtual address in the 18 - For configurations with MMUv3 and CONFIG_MMU=y the address may be either a
|
/linux/arch/riscv/ |
H A D | Kconfig | 1 # SPDX-License-Identifier: GPL-2.0-only 4 # see Documentation/kbuild/kconfig-language.rst. 31 select ARCH_HAS_DEBUG_VIRTUAL if MMU 48 select ARCH_HAS_PTDUMP if MMU 49 select ARCH_HAS_PTE_DEVMAP if 64BIT && MMU 51 select ARCH_HAS_SET_DIRECT_MAP if MMU 52 select ARCH_HAS_SET_MEMORY if MMU 53 select ARCH_HAS_STRICT_KERNEL_RWX if MMU && !XIP_KERNEL 54 select ARCH_HAS_STRICT_MODULE_RWX if MMU && !XIP_KERNEL 61 select ARCH_MHP_MEMMAP_ON_MEMORY_ENABLE if 64BIT && MMU [all …]
|
/linux/arch/microblaze/kernel/ |
H A D | misc.S | 2 * Miscellaneous low-level MMU functions. 4 * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu> 5 * Copyright (C) 2008-2009 PetaLogix 19 #include <asm/mmu.h> 24 * Flush MMU TLB 29 .type _tlbia, @function 39 rsubi r11, r12, MICROBLAZE_TLB_SIZE - 1 45 .size _tlbia, . - _tlbia 48 * Flush MMU TLB for a particular address (in r5) 51 .type _tlbie, @function [all …]
|
/linux/arch/arm64/kvm/hyp/nvhe/ |
H A D | tlb.c | 1 // SPDX-License-Identifier: GPL-2.0-only 3 * Copyright (C) 2015 - ARM Ltd 14 struct kvm_s2_mmu *mmu; member 19 static void enter_vmid_context(struct kvm_s2_mmu *mmu, in enter_vmid_context() argument 23 struct kvm_s2_mmu *host_s2_mmu = &host_mmu.arch.mmu; in enter_vmid_context() 27 host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt; in enter_vmid_context() 28 vcpu = host_ctxt->__hyp_running_vcpu; in enter_vmid_context() 29 cxt->mmu = NULL; in enter_vmid_context() 34 * - ensure that the page table updates are visible to all in enter_vmid_context() 35 * CPUs, for which a dsb(DOMAIN-st) is what we need, DOMAIN in enter_vmid_context() [all …]
|
/linux/arch/x86/kvm/mmu/ |
H A D | spte.h | 1 // SPDX-License-Identifier: GPL-2.0-only 8 #include "mmu.h" 12 * A MMU present SPTE is backed by actual memory and may or may not be present 15 * better code than for a high bit, e.g. 56+. MMU present checks are pervasive 22 * be restricted to using write-protection (for L2 when CPU dirty logging, i.e. 23 * PML, is enabled). Use bits 52 and 53 to hold the type of A/D tracking that 28 * is guaranteed to have A/D bits and write-protection is forced only for 29 * TDP with CPU dirty logging (PML). If NPT ever gains PML-like support, it 30 * must be restricted to 64-bit KVM. 40 #define SPTE_BASE_ADDR_MASK (physical_mask & ~(u64)(PAGE_SIZE-1)) [all …]
|