1 /* 2 * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22 #include <core/tegra.h> 23 #ifdef CONFIG_NOUVEAU_PLATFORM_DRIVER 24 #include "priv.h" 25 26 static int 27 nvkm_device_tegra_power_up(struct nvkm_device_tegra *tdev) 28 { 29 int ret; 30 31 ret = regulator_enable(tdev->vdd); 32 if (ret) 33 goto err_power; 34 35 ret = clk_prepare_enable(tdev->clk); 36 if (ret) 37 goto err_clk; 38 if (tdev->clk_ref) { 39 ret = clk_prepare_enable(tdev->clk_ref); 40 if (ret) 41 goto err_clk_ref; 42 } 43 ret = clk_prepare_enable(tdev->clk_pwr); 44 if (ret) 45 goto err_clk_pwr; 46 clk_set_rate(tdev->clk_pwr, 204000000); 47 udelay(10); 48 49 reset_control_assert(tdev->rst); 50 udelay(10); 51 52 ret = tegra_powergate_remove_clamping(TEGRA_POWERGATE_3D); 53 if (ret) 54 goto err_clamp; 55 udelay(10); 56 57 reset_control_deassert(tdev->rst); 58 udelay(10); 59 60 return 0; 61 62 err_clamp: 63 clk_disable_unprepare(tdev->clk_pwr); 64 err_clk_pwr: 65 if (tdev->clk_ref) 66 clk_disable_unprepare(tdev->clk_ref); 67 err_clk_ref: 68 clk_disable_unprepare(tdev->clk); 69 err_clk: 70 regulator_disable(tdev->vdd); 71 err_power: 72 return ret; 73 } 74 75 static int 76 nvkm_device_tegra_power_down(struct nvkm_device_tegra *tdev) 77 { 78 reset_control_assert(tdev->rst); 79 udelay(10); 80 81 clk_disable_unprepare(tdev->clk_pwr); 82 if (tdev->clk_ref) 83 clk_disable_unprepare(tdev->clk_ref); 84 clk_disable_unprepare(tdev->clk); 85 udelay(10); 86 87 return regulator_disable(tdev->vdd); 88 } 89 90 static void 91 nvkm_device_tegra_probe_iommu(struct nvkm_device_tegra *tdev) 92 { 93 #if IS_ENABLED(CONFIG_IOMMU_API) 94 struct device *dev = &tdev->pdev->dev; 95 unsigned long pgsize_bitmap; 96 int ret; 97 98 if (!tdev->func->iommu_bit) 99 return; 100 101 mutex_init(&tdev->iommu.mutex); 102 103 if (iommu_present(&platform_bus_type)) { 104 tdev->iommu.domain = iommu_domain_alloc(&platform_bus_type); 105 if (IS_ERR(tdev->iommu.domain)) 106 goto error; 107 108 /* 109 * A IOMMU is only usable if it supports page sizes smaller 110 * or equal to the system's PAGE_SIZE, with a preference if 111 * both are equal. 112 */ 113 pgsize_bitmap = tdev->iommu.domain->ops->pgsize_bitmap; 114 if (pgsize_bitmap & PAGE_SIZE) { 115 tdev->iommu.pgshift = PAGE_SHIFT; 116 } else { 117 tdev->iommu.pgshift = fls(pgsize_bitmap & ~PAGE_MASK); 118 if (tdev->iommu.pgshift == 0) { 119 dev_warn(dev, "unsupported IOMMU page size\n"); 120 goto free_domain; 121 } 122 tdev->iommu.pgshift -= 1; 123 } 124 125 ret = iommu_attach_device(tdev->iommu.domain, dev); 126 if (ret) 127 goto free_domain; 128 129 ret = nvkm_mm_init(&tdev->iommu.mm, 0, 130 (1ULL << tdev->func->iommu_bit) >> 131 tdev->iommu.pgshift, 1); 132 if (ret) 133 goto detach_device; 134 } 135 136 return; 137 138 detach_device: 139 iommu_detach_device(tdev->iommu.domain, dev); 140 141 free_domain: 142 iommu_domain_free(tdev->iommu.domain); 143 144 error: 145 tdev->iommu.domain = NULL; 146 tdev->iommu.pgshift = 0; 147 dev_err(dev, "cannot initialize IOMMU MM\n"); 148 #endif 149 } 150 151 static void 152 nvkm_device_tegra_remove_iommu(struct nvkm_device_tegra *tdev) 153 { 154 #if IS_ENABLED(CONFIG_IOMMU_API) 155 if (tdev->iommu.domain) { 156 nvkm_mm_fini(&tdev->iommu.mm); 157 iommu_detach_device(tdev->iommu.domain, tdev->device.dev); 158 iommu_domain_free(tdev->iommu.domain); 159 } 160 #endif 161 } 162 163 static struct nvkm_device_tegra * 164 nvkm_device_tegra(struct nvkm_device *device) 165 { 166 return container_of(device, struct nvkm_device_tegra, device); 167 } 168 169 static struct resource * 170 nvkm_device_tegra_resource(struct nvkm_device *device, unsigned bar) 171 { 172 struct nvkm_device_tegra *tdev = nvkm_device_tegra(device); 173 return platform_get_resource(tdev->pdev, IORESOURCE_MEM, bar); 174 } 175 176 static resource_size_t 177 nvkm_device_tegra_resource_addr(struct nvkm_device *device, unsigned bar) 178 { 179 struct resource *res = nvkm_device_tegra_resource(device, bar); 180 return res ? res->start : 0; 181 } 182 183 static resource_size_t 184 nvkm_device_tegra_resource_size(struct nvkm_device *device, unsigned bar) 185 { 186 struct resource *res = nvkm_device_tegra_resource(device, bar); 187 return res ? resource_size(res) : 0; 188 } 189 190 static irqreturn_t 191 nvkm_device_tegra_intr(int irq, void *arg) 192 { 193 struct nvkm_device_tegra *tdev = arg; 194 struct nvkm_mc *mc = tdev->device.mc; 195 bool handled = false; 196 if (likely(mc)) { 197 nvkm_mc_intr_unarm(mc); 198 nvkm_mc_intr(mc, &handled); 199 nvkm_mc_intr_rearm(mc); 200 } 201 return handled ? IRQ_HANDLED : IRQ_NONE; 202 } 203 204 static void 205 nvkm_device_tegra_fini(struct nvkm_device *device, bool suspend) 206 { 207 struct nvkm_device_tegra *tdev = nvkm_device_tegra(device); 208 if (tdev->irq) { 209 free_irq(tdev->irq, tdev); 210 tdev->irq = 0; 211 }; 212 } 213 214 static int 215 nvkm_device_tegra_init(struct nvkm_device *device) 216 { 217 struct nvkm_device_tegra *tdev = nvkm_device_tegra(device); 218 int irq, ret; 219 220 irq = platform_get_irq_byname(tdev->pdev, "stall"); 221 if (irq < 0) 222 return irq; 223 224 ret = request_irq(irq, nvkm_device_tegra_intr, 225 IRQF_SHARED, "nvkm", tdev); 226 if (ret) 227 return ret; 228 229 tdev->irq = irq; 230 return 0; 231 } 232 233 static void * 234 nvkm_device_tegra_dtor(struct nvkm_device *device) 235 { 236 struct nvkm_device_tegra *tdev = nvkm_device_tegra(device); 237 nvkm_device_tegra_power_down(tdev); 238 nvkm_device_tegra_remove_iommu(tdev); 239 return tdev; 240 } 241 242 static const struct nvkm_device_func 243 nvkm_device_tegra_func = { 244 .tegra = nvkm_device_tegra, 245 .dtor = nvkm_device_tegra_dtor, 246 .init = nvkm_device_tegra_init, 247 .fini = nvkm_device_tegra_fini, 248 .resource_addr = nvkm_device_tegra_resource_addr, 249 .resource_size = nvkm_device_tegra_resource_size, 250 .cpu_coherent = false, 251 }; 252 253 int 254 nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func, 255 struct platform_device *pdev, 256 const char *cfg, const char *dbg, 257 bool detect, bool mmio, u64 subdev_mask, 258 struct nvkm_device **pdevice) 259 { 260 struct nvkm_device_tegra *tdev; 261 int ret; 262 263 if (!(tdev = kzalloc(sizeof(*tdev), GFP_KERNEL))) 264 return -ENOMEM; 265 266 tdev->func = func; 267 tdev->pdev = pdev; 268 269 tdev->vdd = devm_regulator_get(&pdev->dev, "vdd"); 270 if (IS_ERR(tdev->vdd)) { 271 ret = PTR_ERR(tdev->vdd); 272 goto free; 273 } 274 275 tdev->rst = devm_reset_control_get(&pdev->dev, "gpu"); 276 if (IS_ERR(tdev->rst)) { 277 ret = PTR_ERR(tdev->rst); 278 goto free; 279 } 280 281 tdev->clk = devm_clk_get(&pdev->dev, "gpu"); 282 if (IS_ERR(tdev->clk)) { 283 ret = PTR_ERR(tdev->clk); 284 goto free; 285 } 286 287 if (func->require_ref_clk) 288 tdev->clk_ref = devm_clk_get(&pdev->dev, "ref"); 289 if (IS_ERR(tdev->clk_ref)) { 290 ret = PTR_ERR(tdev->clk_ref); 291 goto free; 292 } 293 294 tdev->clk_pwr = devm_clk_get(&pdev->dev, "pwr"); 295 if (IS_ERR(tdev->clk_pwr)) { 296 ret = PTR_ERR(tdev->clk_pwr); 297 goto free; 298 } 299 300 /** 301 * The IOMMU bit defines the upper limit of the GPU-addressable space. 302 * This will be refined in nouveau_ttm_init but we need to do it early 303 * for instmem to behave properly 304 */ 305 ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(tdev->func->iommu_bit)); 306 if (ret) 307 goto free; 308 309 nvkm_device_tegra_probe_iommu(tdev); 310 311 ret = nvkm_device_tegra_power_up(tdev); 312 if (ret) 313 goto remove; 314 315 tdev->gpu_speedo = tegra_sku_info.gpu_speedo_value; 316 ret = nvkm_device_ctor(&nvkm_device_tegra_func, NULL, &pdev->dev, 317 NVKM_DEVICE_TEGRA, pdev->id, NULL, 318 cfg, dbg, detect, mmio, subdev_mask, 319 &tdev->device); 320 if (ret) 321 goto powerdown; 322 323 *pdevice = &tdev->device; 324 325 return 0; 326 327 powerdown: 328 nvkm_device_tegra_power_down(tdev); 329 remove: 330 nvkm_device_tegra_remove_iommu(tdev); 331 free: 332 kfree(tdev); 333 return ret; 334 } 335 #else 336 int 337 nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func, 338 struct platform_device *pdev, 339 const char *cfg, const char *dbg, 340 bool detect, bool mmio, u64 subdev_mask, 341 struct nvkm_device **pdevice) 342 { 343 return -ENOSYS; 344 } 345 #endif 346