1 /* 2 * Copyright (C) 2014 Christian Gmeiner <christian.gmeiner@gmail.com> 3 * 4 * This program is free software; you can redistribute it and/or modify it 5 * under the terms of the GNU General Public License version 2 as published by 6 * the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * more details. 12 * 13 * You should have received a copy of the GNU General Public License along with 14 * this program. If not, see <http://www.gnu.org/licenses/>. 15 */ 16 17 #include <linux/platform_device.h> 18 #include <linux/sizes.h> 19 #include <linux/slab.h> 20 #include <linux/dma-mapping.h> 21 #include <linux/bitops.h> 22 23 #include "etnaviv_gpu.h" 24 #include "etnaviv_mmu.h" 25 #include "etnaviv_iommu.h" 26 #include "state_hi.xml.h" 27 28 #define PT_SIZE SZ_2M 29 #define PT_ENTRIES (PT_SIZE / sizeof(u32)) 30 31 #define GPU_MEM_START 0x80000000 32 33 struct etnaviv_iommuv1_domain { 34 struct etnaviv_iommu_domain base; 35 u32 *pgtable_cpu; 36 dma_addr_t pgtable_dma; 37 }; 38 39 static struct etnaviv_iommuv1_domain * 40 to_etnaviv_domain(struct etnaviv_iommu_domain *domain) 41 { 42 return container_of(domain, struct etnaviv_iommuv1_domain, base); 43 } 44 45 static int __etnaviv_iommu_init(struct etnaviv_iommuv1_domain *etnaviv_domain) 46 { 47 u32 *p; 48 int i; 49 50 etnaviv_domain->base.bad_page_cpu = dma_alloc_coherent( 51 etnaviv_domain->base.dev, 52 SZ_4K, 53 &etnaviv_domain->base.bad_page_dma, 54 GFP_KERNEL); 55 if (!etnaviv_domain->base.bad_page_cpu) 56 return -ENOMEM; 57 58 p = etnaviv_domain->base.bad_page_cpu; 59 for (i = 0; i < SZ_4K / 4; i++) 60 *p++ = 0xdead55aa; 61 62 etnaviv_domain->pgtable_cpu = 63 dma_alloc_coherent(etnaviv_domain->base.dev, PT_SIZE, 64 &etnaviv_domain->pgtable_dma, 65 GFP_KERNEL); 66 if (!etnaviv_domain->pgtable_cpu) { 67 dma_free_coherent(etnaviv_domain->base.dev, SZ_4K, 68 etnaviv_domain->base.bad_page_cpu, 69 etnaviv_domain->base.bad_page_dma); 70 return -ENOMEM; 71 } 72 73 memset32(etnaviv_domain->pgtable_cpu, etnaviv_domain->base.bad_page_dma, 74 PT_ENTRIES); 75 76 return 0; 77 } 78 79 static void etnaviv_iommuv1_domain_free(struct etnaviv_iommu_domain *domain) 80 { 81 struct etnaviv_iommuv1_domain *etnaviv_domain = 82 to_etnaviv_domain(domain); 83 84 dma_free_coherent(etnaviv_domain->base.dev, PT_SIZE, 85 etnaviv_domain->pgtable_cpu, 86 etnaviv_domain->pgtable_dma); 87 88 dma_free_coherent(etnaviv_domain->base.dev, SZ_4K, 89 etnaviv_domain->base.bad_page_cpu, 90 etnaviv_domain->base.bad_page_dma); 91 92 kfree(etnaviv_domain); 93 } 94 95 static int etnaviv_iommuv1_map(struct etnaviv_iommu_domain *domain, 96 unsigned long iova, phys_addr_t paddr, 97 size_t size, int prot) 98 { 99 struct etnaviv_iommuv1_domain *etnaviv_domain = to_etnaviv_domain(domain); 100 unsigned int index = (iova - GPU_MEM_START) / SZ_4K; 101 102 if (size != SZ_4K) 103 return -EINVAL; 104 105 etnaviv_domain->pgtable_cpu[index] = paddr; 106 107 return 0; 108 } 109 110 static size_t etnaviv_iommuv1_unmap(struct etnaviv_iommu_domain *domain, 111 unsigned long iova, size_t size) 112 { 113 struct etnaviv_iommuv1_domain *etnaviv_domain = 114 to_etnaviv_domain(domain); 115 unsigned int index = (iova - GPU_MEM_START) / SZ_4K; 116 117 if (size != SZ_4K) 118 return -EINVAL; 119 120 etnaviv_domain->pgtable_cpu[index] = etnaviv_domain->base.bad_page_dma; 121 122 return SZ_4K; 123 } 124 125 static size_t etnaviv_iommuv1_dump_size(struct etnaviv_iommu_domain *domain) 126 { 127 return PT_SIZE; 128 } 129 130 static void etnaviv_iommuv1_dump(struct etnaviv_iommu_domain *domain, void *buf) 131 { 132 struct etnaviv_iommuv1_domain *etnaviv_domain = 133 to_etnaviv_domain(domain); 134 135 memcpy(buf, etnaviv_domain->pgtable_cpu, PT_SIZE); 136 } 137 138 void etnaviv_iommuv1_restore(struct etnaviv_gpu *gpu) 139 { 140 struct etnaviv_iommuv1_domain *etnaviv_domain = 141 to_etnaviv_domain(gpu->mmu->domain); 142 u32 pgtable; 143 144 /* set base addresses */ 145 gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_RA, gpu->memory_base); 146 gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_FE, gpu->memory_base); 147 gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_TX, gpu->memory_base); 148 gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_PEZ, gpu->memory_base); 149 gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_PE, gpu->memory_base); 150 151 /* set page table address in MC */ 152 pgtable = (u32)etnaviv_domain->pgtable_dma; 153 154 gpu_write(gpu, VIVS_MC_MMU_FE_PAGE_TABLE, pgtable); 155 gpu_write(gpu, VIVS_MC_MMU_TX_PAGE_TABLE, pgtable); 156 gpu_write(gpu, VIVS_MC_MMU_PE_PAGE_TABLE, pgtable); 157 gpu_write(gpu, VIVS_MC_MMU_PEZ_PAGE_TABLE, pgtable); 158 gpu_write(gpu, VIVS_MC_MMU_RA_PAGE_TABLE, pgtable); 159 } 160 161 static const struct etnaviv_iommu_domain_ops etnaviv_iommuv1_ops = { 162 .free = etnaviv_iommuv1_domain_free, 163 .map = etnaviv_iommuv1_map, 164 .unmap = etnaviv_iommuv1_unmap, 165 .dump_size = etnaviv_iommuv1_dump_size, 166 .dump = etnaviv_iommuv1_dump, 167 }; 168 169 struct etnaviv_iommu_domain * 170 etnaviv_iommuv1_domain_alloc(struct etnaviv_gpu *gpu) 171 { 172 struct etnaviv_iommuv1_domain *etnaviv_domain; 173 struct etnaviv_iommu_domain *domain; 174 int ret; 175 176 etnaviv_domain = kzalloc(sizeof(*etnaviv_domain), GFP_KERNEL); 177 if (!etnaviv_domain) 178 return NULL; 179 180 domain = &etnaviv_domain->base; 181 182 domain->dev = gpu->dev; 183 domain->base = GPU_MEM_START; 184 domain->size = PT_ENTRIES * SZ_4K; 185 domain->ops = &etnaviv_iommuv1_ops; 186 187 ret = __etnaviv_iommu_init(etnaviv_domain); 188 if (ret) 189 goto out_free; 190 191 return &etnaviv_domain->base; 192 193 out_free: 194 kfree(etnaviv_domain); 195 return NULL; 196 } 197