Home
last modified time | relevance | path

Searched +full:mmu +full:- +full:type (Results 1 – 25 of 445) sorted by relevance

12345678910>>...18

/linux/drivers/gpu/drm/nouveau/nvif/
H A Dmmu.c22 #include <nvif/mmu.h>
28 nvif_mmu_dtor(struct nvif_mmu *mmu) in nvif_mmu_dtor() argument
30 if (!nvif_object_constructed(&mmu->object)) in nvif_mmu_dtor()
33 kfree(mmu->kind); in nvif_mmu_dtor()
34 kfree(mmu->type); in nvif_mmu_dtor()
35 kfree(mmu->heap); in nvif_mmu_dtor()
36 nvif_object_dtor(&mmu->object); in nvif_mmu_dtor()
41 struct nvif_mmu *mmu) in nvif_mmu_ctor() argument
44 { NVIF_CLASS_MEM_GF100, -1 }, in nvif_mmu_ctor()
45 { NVIF_CLASS_MEM_NV50 , -1 }, in nvif_mmu_ctor()
[all …]
H A Dmem.c28 nvif_mem_ctor_map(struct nvif_mmu *mmu, const char *name, u8 type, u64 size, in nvif_mem_ctor_map() argument
31 int ret = nvif_mem_ctor(mmu, name, mmu->mem, NVIF_MEM_MAPPABLE | type, in nvif_mem_ctor_map()
34 ret = nvif_object_map(&mem->object, NULL, 0); in nvif_mem_ctor_map()
44 nvif_object_dtor(&mem->object); in nvif_mem_dtor()
48 nvif_mem_ctor_type(struct nvif_mmu *mmu, const char *name, s32 oclass, in nvif_mem_ctor_type() argument
49 int type, u8 page, u64 size, void *argv, u32 argc, in nvif_mem_ctor_type() argument
56 mem->object.client = NULL; in nvif_mem_ctor_type()
57 if (type < 0) in nvif_mem_ctor_type()
58 return -EINVAL; in nvif_mem_ctor_type()
62 return -ENOMEM; in nvif_mem_ctor_type()
[all …]
/linux/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/
H A Dbase.c42 nvkm_mmu_ptp_put(struct nvkm_mmu *mmu, bool force, struct nvkm_mmu_pt *pt) in nvkm_mmu_ptp_put() argument
44 const int slot = pt->base >> pt->ptp->shift; in nvkm_mmu_ptp_put()
45 struct nvkm_mmu_ptp *ptp = pt->ptp; in nvkm_mmu_ptp_put()
50 if (!ptp->free) in nvkm_mmu_ptp_put()
51 list_add(&ptp->head, &mmu->ptp.list); in nvkm_mmu_ptp_put()
52 ptp->free |= BIT(slot); in nvkm_mmu_ptp_put()
54 /* If there's no more sub-allocations, destroy PTP. */ in nvkm_mmu_ptp_put()
55 if (ptp->free == ptp->mask) { in nvkm_mmu_ptp_put()
56 nvkm_mmu_ptc_put(mmu, force, &ptp->pt); in nvkm_mmu_ptp_put()
57 list_del(&ptp->head); in nvkm_mmu_ptp_put()
[all …]
H A Dummu.c35 struct nvkm_mmu *mmu = nvkm_ummu(object)->mmu; in nvkm_ummu_sclass() local
37 if (mmu->func->mem.user.oclass) { in nvkm_ummu_sclass()
38 if (index-- == 0) { in nvkm_ummu_sclass()
39 oclass->base = mmu->func->mem.user; in nvkm_ummu_sclass()
40 oclass->ctor = nvkm_umem_new; in nvkm_ummu_sclass()
45 if (mmu->func->vmm.user.oclass) { in nvkm_ummu_sclass()
46 if (index-- == 0) { in nvkm_ummu_sclass()
47 oclass->base = mmu->func->vmm.user; in nvkm_ummu_sclass()
48 oclass->ctor = nvkm_uvmm_new; in nvkm_ummu_sclass()
53 return -EINVAL; in nvkm_ummu_sclass()
[all …]
H A Dumem.c37 struct nvkm_client *master = client->object.client; in nvkm_umem_search()
45 spin_lock(&master->lock); in nvkm_umem_search()
46 list_for_each_entry(umem, &master->umem, head) { in nvkm_umem_search()
47 if (umem->object.object == handle) { in nvkm_umem_search()
48 memory = nvkm_memory_ref(umem->memory); in nvkm_umem_search()
52 spin_unlock(&master->lock); in nvkm_umem_search()
56 memory = nvkm_memory_ref(umem->memory); in nvkm_umem_search()
59 return memory ? memory : ERR_PTR(-ENOENT); in nvkm_umem_search()
67 if (!umem->map) in nvkm_umem_unmap()
68 return -EEXIST; in nvkm_umem_unmap()
[all …]
H A Dmem.c33 struct nvkm_mmu *mmu; member
45 return nvkm_mem(memory)->target; in nvkm_mem_target()
58 if (mem->pages == 1 && mem->mem) in nvkm_mem_addr()
59 return mem->dma[0]; in nvkm_mem_addr()
66 return nvkm_mem(memory)->pages << PAGE_SHIFT; in nvkm_mem_size()
75 .memory = &mem->memory, in nvkm_mem_map_dma()
77 .dma = mem->dma, in nvkm_mem_map_dma()
86 if (mem->mem) { in nvkm_mem_dtor()
87 while (mem->pages--) { in nvkm_mem_dtor()
88 dma_unmap_page(mem->mmu->subdev.device->dev, in nvkm_mem_dtor()
[all …]
H A Dvmmgf100.c35 u64 base = (addr >> 8) | map->type; in gf100_vmm_pgt_pte()
38 if (map->ctag && !(map->next & (1ULL << 44))) { in gf100_vmm_pgt_pte()
39 while (ptes--) { in gf100_vmm_pgt_pte()
40 data = base | ((map->ctag >> 1) << 44); in gf100_vmm_pgt_pte()
41 if (!(map->ctag++ & 1)) in gf100_vmm_pgt_pte()
45 base += map->next; in gf100_vmm_pgt_pte()
48 map->type += ptes * map->ctag; in gf100_vmm_pgt_pte()
50 while (ptes--) { in gf100_vmm_pgt_pte()
52 data += map->next; in gf100_vmm_pgt_pte()
68 if (map->page->shift == PAGE_SHIFT) { in gf100_vmm_pgt_dma()
[all …]
H A Dmemnv04.c31 nv04_mem_map(struct nvkm_mmu *mmu, struct nvkm_memory *memory, void *argv, in nv04_mem_map() argument
37 struct nvkm_device *device = mmu->subdev.device; in nv04_mem_map()
39 int ret = -ENOSYS; in nv04_mem_map()
41 if ((ret = nvif_unvers(ret, &argv, &argc, args->vn))) in nv04_mem_map()
44 *paddr = device->func->resource_addr(device, 1) + addr; in nv04_mem_map()
46 *pvma = ERR_PTR(-ENODEV); in nv04_mem_map()
51 nv04_mem_new(struct nvkm_mmu *mmu, int type, u8 page, u64 size, in nv04_mem_new() argument
57 int ret = -ENOSYS; in nv04_mem_new()
59 if ((ret = nvif_unvers(ret, &argv, &argc, args->vn))) in nv04_mem_new()
62 if (mmu->type[type].type & NVKM_MEM_MAPPABLE) in nv04_mem_new()
[all …]
H A Dvmmnv50.c35 u64 next = addr + map->type, data; in nv50_vmm_pgt_pte()
39 map->type += ptes * map->ctag; in nv50_vmm_pgt_pte()
42 for (log2blk = 7; log2blk >= 0; log2blk--) { in nv50_vmm_pgt_pte()
49 next += pten * map->next; in nv50_vmm_pgt_pte()
50 ptes -= pten; in nv50_vmm_pgt_pte()
52 while (pten--) in nv50_vmm_pgt_pte()
68 if (map->page->shift == PAGE_SHIFT) { in nv50_vmm_pgt_dma()
70 nvkm_kmap(pt->memory); in nv50_vmm_pgt_dma()
71 while (ptes--) { in nv50_vmm_pgt_dma()
72 const u64 data = *map->dma++ + map->type; in nv50_vmm_pgt_dma()
[all …]
H A Dvmmgp100.c37 struct device *dev = vmm->mmu->subdev.device->dev; in gp100_vmm_pfn_unmap()
40 nvkm_kmap(pt->memory); in gp100_vmm_pfn_unmap()
41 while (ptes--) { in gp100_vmm_pfn_unmap()
42 u32 datalo = nvkm_ro32(pt->memory, pt->base + ptei * 8 + 0); in gp100_vmm_pfn_unmap()
43 u32 datahi = nvkm_ro32(pt->memory, pt->base + ptei * 8 + 4); in gp100_vmm_pfn_unmap()
51 nvkm_done(pt->memory); in gp100_vmm_pfn_unmap()
59 nvkm_kmap(pt->memory); in gp100_vmm_pfn_clear()
60 while (ptes--) { in gp100_vmm_pfn_clear()
61 u32 datalo = nvkm_ro32(pt->memory, pt->base + ptei * 8 + 0); in gp100_vmm_pfn_clear()
62 u32 datahi = nvkm_ro32(pt->memory, pt->base + ptei * 8 + 4); in gp100_vmm_pfn_clear()
[all …]
H A Dmemgf100.c34 gf100_mem_map(struct nvkm_mmu *mmu, struct nvkm_memory *memory, void *argv, in gf100_mem_map() argument
42 struct nvkm_device *device = mmu->subdev.device; in gf100_mem_map()
44 int ret = -ENOSYS; in gf100_mem_map()
46 if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) { in gf100_mem_map()
47 uvmm.ro = args->v0.ro; in gf100_mem_map()
48 uvmm.kind = args->v0.kind; in gf100_mem_map()
50 if (!(ret = nvif_unvers(ret, &argv, &argc, args->vn))) { in gf100_mem_map()
63 *paddr = device->func->resource_addr(device, 1) + (*pvma)->addr; in gf100_mem_map()
64 *psize = (*pvma)->size; in gf100_mem_map()
69 gf100_mem_new(struct nvkm_mmu *mmu, int type, u8 page, u64 size, in gf100_mem_new() argument
[all …]
H A Dnv44.c32 nv44_mmu_init(struct nvkm_mmu *mmu) in nv44_mmu_init() argument
34 struct nvkm_device *device = mmu->subdev.device; in nv44_mmu_init()
35 struct nvkm_memory *pt = mmu->vmm->pd->pt[0]->memory; in nv44_mmu_init()
43 addr -= ((nvkm_memory_addr(pt) >> 19) + 1) << 19; in nv44_mmu_init()
46 nvkm_wr32(device, 0x100818, mmu->vmm->null); in nv44_mmu_init()
59 .mmu = {{ -1, -1, NVIF_CLASS_MMU_NV04}},
60 .mem = {{ -1, -1, NVIF_CLASS_MEM_NV04}, nv04_mem_new, nv04_mem_map },
61 .vmm = {{ -1, -1, NVIF_CLASS_VMM_NV04}, nv44_vmm_new, true },
65 nv44_mmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, in nv44_mmu_new() argument
68 if (device->type == NVKM_DEVICE_AGP || in nv44_mmu_new()
[all …]
H A Dvmmtu102.c29 struct nvkm_device *device = vmm->mmu->subdev.device; in tu102_vmm_flush()
30 u32 type = 0; in tu102_vmm_flush() local
32 type |= 0x00000001; /* PAGE_ALL */ in tu102_vmm_flush()
33 if (atomic_read(&vmm->engref[NVKM_SUBDEV_BAR])) in tu102_vmm_flush()
34 type |= 0x00000006; /* HUB_ONLY | ALL PDB (hack) */ in tu102_vmm_flush()
36 mutex_lock(&vmm->mmu->mutex); in tu102_vmm_flush()
38 if (!vmm->rm.bar2_pdb) in tu102_vmm_flush()
39 nvkm_wr32(device, 0xb830a0, vmm->pd->pt[0]->addr >> 8); in tu102_vmm_flush()
41 nvkm_wr32(device, 0xb830a0, vmm->rm.bar2_pdb >> 8); in tu102_vmm_flush()
43 nvkm_wr32(device, 0xb830b0, 0x80000000 | type); in tu102_vmm_flush()
[all …]
H A Dnv41.c32 nv41_mmu_init(struct nvkm_mmu *mmu) in nv41_mmu_init() argument
34 struct nvkm_device *device = mmu->subdev.device; in nv41_mmu_init()
35 nvkm_wr32(device, 0x100800, 0x00000002 | mmu->vmm->pd->pt[0]->addr); in nv41_mmu_init()
44 .mmu = {{ -1, -1, NVIF_CLASS_MMU_NV04}},
45 .mem = {{ -1, -1, NVIF_CLASS_MEM_NV04}, nv04_mem_new, nv04_mem_map },
46 .vmm = {{ -1, -1, NVIF_CLASS_VMM_NV04}, nv41_vmm_new, true },
50 nv41_mmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, in nv41_mmu_new() argument
53 if (device->type == NVKM_DEVICE_AGP || in nv41_mmu_new()
54 !nvkm_boolopt(device->cfgopt, "NvPCIE", true)) in nv41_mmu_new()
55 return nv04_mmu_new(device, type, inst, pmmu); in nv41_mmu_new()
[all …]
/linux/drivers/gpu/drm/msm/
H A Dmsm_mmu.h1 /* SPDX-License-Identifier: GPL-2.0-only */
13 void (*detach)(struct msm_mmu *mmu);
14 int (*map)(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt,
16 int (*unmap)(struct msm_mmu *mmu, uint64_t iova, size_t len);
17 void (*destroy)(struct msm_mmu *mmu);
18 void (*resume_translation)(struct msm_mmu *mmu);
32 enum msm_mmu_type type; member
35 static inline void msm_mmu_init(struct msm_mmu *mmu, struct device *dev, in msm_mmu_init() argument
36 const struct msm_mmu_funcs *funcs, enum msm_mmu_type type) in msm_mmu_init() argument
38 mmu->dev = dev; in msm_mmu_init()
[all …]
/linux/drivers/gpu/drm/nouveau/
H A Dnouveau_mem.c46 switch (vmm->object.oclass) { in nouveau_mem_map()
53 args.nv50.kind = mem->kind; in nouveau_mem_map()
54 args.nv50.comp = mem->comp; in nouveau_mem_map()
61 if (mem->mem.type & NVIF_MEM_VRAM) in nouveau_mem_map()
67 args.gf100.kind = mem->kind; in nouveau_mem_map()
72 return -ENOSYS; in nouveau_mem_map()
75 return nvif_vmm_map(vmm, vma->addr, mem->mem.size, &args, argc, &mem->mem, 0); in nouveau_mem_map()
81 nvif_vmm_put(&mem->drm->client.vmm.vmm, &mem->vma[1]); in nouveau_mem_fini()
82 nvif_vmm_put(&mem->drm->client.vmm.vmm, &mem->vma[0]); in nouveau_mem_fini()
83 mutex_lock(&mem->drm->client_mutex); in nouveau_mem_fini()
[all …]
/linux/arch/riscv/boot/dts/sophgo/
H A Dsg2042-cpus.dtsi1 // SPDX-License-Identifier: (GPL-2.0 OR MIT)
8 #address-cells = <1>;
9 #size-cells = <0>;
10 timebase-frequency = <50000000>;
12 cpu-map {
260 riscv,isa-base = "rv64i";
261 riscv,isa-extensions = "i", "m", "a", "f", "d", "c",
265 i-cache-block-size = <64>;
266 i-cache-size = <65536>;
267 i-cache-sets = <512>;
[all …]
/linux/drivers/gpu/drm/nouveau/include/nvif/
H A Dmmu.h27 u8 type; member
29 } *type; member
39 nvif_mmu_kind_valid(struct nvif_mmu *mmu, u8 kind) in nvif_mmu_kind_valid() argument
42 if (kind >= mmu->kind_nr || mmu->kind[kind] == mmu->kind_inv) in nvif_mmu_kind_valid()
49 nvif_mmu_type(struct nvif_mmu *mmu, u8 mask) in nvif_mmu_type() argument
52 for (i = 0; i < mmu->type_nr; i++) { in nvif_mmu_type()
53 if ((mmu->type[i].type & mask) == mask) in nvif_mmu_type()
56 return -EINVAL; in nvif_mmu_type()
/linux/arch/arm/mm/
H A DKconfig1 # SPDX-License-Identifier: GPL-2.0
2 comment "Processor Type"
11 depends on !MMU
17 A 32-bit RISC microprocessor based on the ARM7 processor core
30 select CPU_COPY_V4WT if MMU
34 select CPU_TLB_V4WT if MMU
36 A 32-bit RISC processor with 8kByte Cache, Write Buffer and
37 MMU built around an ARM7TDMI core.
45 depends on !MMU
53 A 32-bit RISC processor with 8KB cache or 4KB variants,
[all …]
/linux/arch/m68k/include/asm/
H A Dsun3mmu.h1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Definitions for Sun3 custom MMU.
10 #include <asm/sun3-head.h>
12 /* MMU characteristics. */
19 #define SUN3_PMEG_MASK (SUN3_PMEG_SIZE - 1)
23 #define SUN3_PTE_MASK (SUN3_PTE_SIZE - 1)
32 #define AC_CONTEXT 0x30000000 /* 34c current mmu-context */
36 #define AC_SYNC_ERR 0x60000000 /* c fault type */
38 #define AC_ASYNC_ERR 0x60000008 /* c asynchronous fault type */
44 #define AC_VME_VECTOR 0xE0000000 /* 4 For non-Autovector VME, byte */
[all …]
/linux/drivers/gpu/drm/panfrost/
H A Dpanfrost_mmu.c1 // SPDX-License-Identifier: GPL-2.0
9 #include <linux/dma-mapping.h>
13 #include <linux/io-pgtable.h>
26 #define mmu_write(dev, reg, data) writel(data, dev->iomem + reg)
27 #define mmu_read(dev, reg) readl(dev->iomem + reg)
34 /* Wait for the MMU status to indicate there is no active command, in in wait_ready()
36 ret = readl_relaxed_poll_timeout_atomic(pfdev->iomem + AS_STATUS(as_nr), in wait_ready()
42 dev_err(pfdev->dev, "AS_ACTIVE bit stuck\n"); in wait_ready()
52 /* write AS_COMMAND when MMU is ready to accept another command */ in write_cmd()
78 region_width = max(fls64(region_start ^ (region_end - 1)), in lock_region()
[all …]
/linux/arch/m68k/include/uapi/asm/
H A Dbootinfo.h1 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
3 * asm/bootinfo.h -- Definition of the Linux/m68k boot information structure
57 #define BI_MACHTYPE 0x0001 /* machine type (__be32) */
58 #define BI_CPUTYPE 0x0002 /* cpu type (__be32) */
59 #define BI_FPUTYPE 0x0003 /* fpu type (__be32) */
60 #define BI_MMUTYPE 0x0004 /* mmu type (__be32) */
70 * - length [ 2 bytes, 16-bit big endian ]
71 * - seed data [ `length` bytes, padded to preserve 4-byte struct alignment ]
97 * CPU, FPU and MMU types (BI_CPUTYPE, BI_FPUTYPE, BI_MMUTYPE)
123 #define FPUB_SUNFPA 4 /* Sun-3 FPA */
[all …]
/linux/arch/xtensa/
H A DKconfig1 # SPDX-License-Identifier: GPL-2.0
6 select ARCH_HAS_BINFMT_FLAT if !MMU
9 select ARCH_HAS_DMA_PREP_COHERENT if MMU
12 select ARCH_HAS_SYNC_DMA_FOR_CPU if MMU
13 select ARCH_HAS_SYNC_DMA_FOR_DEVICE if MMU
14 select ARCH_HAS_DMA_SET_UNCACHED if MMU
25 select DMA_NONCOHERENT_MMAP if MMU
33 select GENERIC_IOREMAP if MMU
36 select HAVE_ARCH_KASAN if MMU && !XIP_KERNEL
61 Xtensa processors are 32-bit RISC machines designed by Tensilica
[all …]
/linux/drivers/gpu/drm/imagination/
H A Dpvr_mmu.c1 // SPDX-License-Identifier: GPL-2.0-only OR MIT
17 #include <linux/dma-mapping.h>
23 #define PVR_MASK_FROM_SIZE(size_) (~((size_) - U64_C(1)))
29 * size and sets up values needed by the MMU code below.
61 (PVR_DEVICE_PAGE_SHIFT - PVR_SHIFT_FROM_SIZE(SZ_4K)))
64 PVR_MMU_SYNC_LEVEL_NONE = -1,
77 * pvr_mmu_set_flush_flags() - Set MMU cache flush flags for next call to
80 * @flags: MMU flush flags. Must be one of %PVR_MMU_SYNC_LEVEL_*_FLAGS.
82 * This function must be called following any possible change to the MMU page
87 atomic_fetch_or(flags, &pvr_dev->mmu_flush_cache_flags); in pvr_mmu_set_flush_flags()
[all …]
/linux/Documentation/arch/xtensa/
H A Dbooting.rst7 tag value constants. First entry in the list must have type BP_TAG_FIRST, last
8 entry must have type BP_TAG_LAST. The address of the first list entry is
9 passed to the kernel in the register a2. The address type depends on MMU type:
11 - For configurations without MMU, with region protection or with MPU the
13 - For configurations with region translarion MMU or with MMUv3 and CONFIG_MMU=n
16 - For configurations with MMUv2 the address must be a virtual address in the
18 - For configurations with MMUv3 and CONFIG_MMU=y the address may be either a

12345678910>>...18