1 /* 2 * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22 #include <core/tegra.h> 23 #ifdef CONFIG_NOUVEAU_PLATFORM_DRIVER 24 #include "priv.h" 25 26 static int 27 nvkm_device_tegra_power_up(struct nvkm_device_tegra *tdev) 28 { 29 int ret; 30 31 ret = regulator_enable(tdev->vdd); 32 if (ret) 33 goto err_power; 34 35 ret = clk_prepare_enable(tdev->clk); 36 if (ret) 37 goto err_clk; 38 ret = clk_prepare_enable(tdev->clk_pwr); 39 if (ret) 40 goto err_clk_pwr; 41 clk_set_rate(tdev->clk_pwr, 204000000); 42 udelay(10); 43 44 reset_control_assert(tdev->rst); 45 udelay(10); 46 47 ret = tegra_powergate_remove_clamping(TEGRA_POWERGATE_3D); 48 if (ret) 49 goto err_clamp; 50 udelay(10); 51 52 reset_control_deassert(tdev->rst); 53 udelay(10); 54 55 return 0; 56 57 err_clamp: 58 clk_disable_unprepare(tdev->clk_pwr); 59 err_clk_pwr: 60 clk_disable_unprepare(tdev->clk); 61 err_clk: 62 regulator_disable(tdev->vdd); 63 err_power: 64 return ret; 65 } 66 67 static int 68 nvkm_device_tegra_power_down(struct nvkm_device_tegra *tdev) 69 { 70 reset_control_assert(tdev->rst); 71 udelay(10); 72 73 clk_disable_unprepare(tdev->clk_pwr); 74 clk_disable_unprepare(tdev->clk); 75 udelay(10); 76 77 return regulator_disable(tdev->vdd); 78 } 79 80 static void 81 nvkm_device_tegra_probe_iommu(struct nvkm_device_tegra *tdev) 82 { 83 #if IS_ENABLED(CONFIG_IOMMU_API) 84 struct device *dev = &tdev->pdev->dev; 85 unsigned long pgsize_bitmap; 86 int ret; 87 88 mutex_init(&tdev->iommu.mutex); 89 90 if (iommu_present(&platform_bus_type)) { 91 tdev->iommu.domain = iommu_domain_alloc(&platform_bus_type); 92 if (IS_ERR(tdev->iommu.domain)) 93 goto error; 94 95 /* 96 * A IOMMU is only usable if it supports page sizes smaller 97 * or equal to the system's PAGE_SIZE, with a preference if 98 * both are equal. 99 */ 100 pgsize_bitmap = tdev->iommu.domain->ops->pgsize_bitmap; 101 if (pgsize_bitmap & PAGE_SIZE) { 102 tdev->iommu.pgshift = PAGE_SHIFT; 103 } else { 104 tdev->iommu.pgshift = fls(pgsize_bitmap & ~PAGE_MASK); 105 if (tdev->iommu.pgshift == 0) { 106 dev_warn(dev, "unsupported IOMMU page size\n"); 107 goto free_domain; 108 } 109 tdev->iommu.pgshift -= 1; 110 } 111 112 ret = iommu_attach_device(tdev->iommu.domain, dev); 113 if (ret) 114 goto free_domain; 115 116 ret = nvkm_mm_init(&tdev->iommu.mm, 0, 117 (1ULL << 40) >> tdev->iommu.pgshift, 1); 118 if (ret) 119 goto detach_device; 120 } 121 122 return; 123 124 detach_device: 125 iommu_detach_device(tdev->iommu.domain, dev); 126 127 free_domain: 128 iommu_domain_free(tdev->iommu.domain); 129 130 error: 131 tdev->iommu.domain = NULL; 132 tdev->iommu.pgshift = 0; 133 dev_err(dev, "cannot initialize IOMMU MM\n"); 134 #endif 135 } 136 137 static void 138 nvkm_device_tegra_remove_iommu(struct nvkm_device_tegra *tdev) 139 { 140 #if IS_ENABLED(CONFIG_IOMMU_API) 141 if (tdev->iommu.domain) { 142 nvkm_mm_fini(&tdev->iommu.mm); 143 iommu_detach_device(tdev->iommu.domain, tdev->device.dev); 144 iommu_domain_free(tdev->iommu.domain); 145 } 146 #endif 147 } 148 149 static struct nvkm_device_tegra * 150 nvkm_device_tegra(struct nvkm_device *device) 151 { 152 return container_of(device, struct nvkm_device_tegra, device); 153 } 154 155 static struct resource * 156 nvkm_device_tegra_resource(struct nvkm_device *device, unsigned bar) 157 { 158 struct nvkm_device_tegra *tdev = nvkm_device_tegra(device); 159 return platform_get_resource(tdev->pdev, IORESOURCE_MEM, bar); 160 } 161 162 static resource_size_t 163 nvkm_device_tegra_resource_addr(struct nvkm_device *device, unsigned bar) 164 { 165 struct resource *res = nvkm_device_tegra_resource(device, bar); 166 return res ? res->start : 0; 167 } 168 169 static resource_size_t 170 nvkm_device_tegra_resource_size(struct nvkm_device *device, unsigned bar) 171 { 172 struct resource *res = nvkm_device_tegra_resource(device, bar); 173 return res ? resource_size(res) : 0; 174 } 175 176 static irqreturn_t 177 nvkm_device_tegra_intr(int irq, void *arg) 178 { 179 struct nvkm_device_tegra *tdev = arg; 180 struct nvkm_mc *mc = tdev->device.mc; 181 bool handled = false; 182 if (likely(mc)) { 183 nvkm_mc_intr_unarm(mc); 184 nvkm_mc_intr(mc, &handled); 185 nvkm_mc_intr_rearm(mc); 186 } 187 return handled ? IRQ_HANDLED : IRQ_NONE; 188 } 189 190 static void 191 nvkm_device_tegra_fini(struct nvkm_device *device, bool suspend) 192 { 193 struct nvkm_device_tegra *tdev = nvkm_device_tegra(device); 194 if (tdev->irq) { 195 free_irq(tdev->irq, tdev); 196 tdev->irq = 0; 197 }; 198 } 199 200 static int 201 nvkm_device_tegra_init(struct nvkm_device *device) 202 { 203 struct nvkm_device_tegra *tdev = nvkm_device_tegra(device); 204 int irq, ret; 205 206 irq = platform_get_irq_byname(tdev->pdev, "stall"); 207 if (irq < 0) 208 return irq; 209 210 ret = request_irq(irq, nvkm_device_tegra_intr, 211 IRQF_SHARED, "nvkm", tdev); 212 if (ret) 213 return ret; 214 215 tdev->irq = irq; 216 return 0; 217 } 218 219 static void * 220 nvkm_device_tegra_dtor(struct nvkm_device *device) 221 { 222 struct nvkm_device_tegra *tdev = nvkm_device_tegra(device); 223 nvkm_device_tegra_power_down(tdev); 224 nvkm_device_tegra_remove_iommu(tdev); 225 return tdev; 226 } 227 228 static const struct nvkm_device_func 229 nvkm_device_tegra_func = { 230 .tegra = nvkm_device_tegra, 231 .dtor = nvkm_device_tegra_dtor, 232 .init = nvkm_device_tegra_init, 233 .fini = nvkm_device_tegra_fini, 234 .resource_addr = nvkm_device_tegra_resource_addr, 235 .resource_size = nvkm_device_tegra_resource_size, 236 .cpu_coherent = false, 237 }; 238 239 int 240 nvkm_device_tegra_new(struct platform_device *pdev, 241 const char *cfg, const char *dbg, 242 bool detect, bool mmio, u64 subdev_mask, 243 struct nvkm_device **pdevice) 244 { 245 struct nvkm_device_tegra *tdev; 246 int ret; 247 248 if (!(tdev = kzalloc(sizeof(*tdev), GFP_KERNEL))) 249 return -ENOMEM; 250 *pdevice = &tdev->device; 251 tdev->pdev = pdev; 252 tdev->irq = -1; 253 254 tdev->vdd = devm_regulator_get(&pdev->dev, "vdd"); 255 if (IS_ERR(tdev->vdd)) 256 return PTR_ERR(tdev->vdd); 257 258 tdev->rst = devm_reset_control_get(&pdev->dev, "gpu"); 259 if (IS_ERR(tdev->rst)) 260 return PTR_ERR(tdev->rst); 261 262 tdev->clk = devm_clk_get(&pdev->dev, "gpu"); 263 if (IS_ERR(tdev->clk)) 264 return PTR_ERR(tdev->clk); 265 266 tdev->clk_pwr = devm_clk_get(&pdev->dev, "pwr"); 267 if (IS_ERR(tdev->clk_pwr)) 268 return PTR_ERR(tdev->clk_pwr); 269 270 nvkm_device_tegra_probe_iommu(tdev); 271 272 ret = nvkm_device_tegra_power_up(tdev); 273 if (ret) 274 return ret; 275 276 tdev->gpu_speedo = tegra_sku_info.gpu_speedo_value; 277 ret = nvkm_device_ctor(&nvkm_device_tegra_func, NULL, &pdev->dev, 278 NVKM_DEVICE_TEGRA, pdev->id, NULL, 279 cfg, dbg, detect, mmio, subdev_mask, 280 &tdev->device); 281 if (ret) 282 return ret; 283 284 return 0; 285 } 286 #else 287 int 288 nvkm_device_tegra_new(struct platform_device *pdev, 289 const char *cfg, const char *dbg, 290 bool detect, bool mmio, u64 subdev_mask, 291 struct nvkm_device **pdevice) 292 { 293 return -ENOSYS; 294 } 295 #endif 296