1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2015, NVIDIA Corporation. 4 */ 5 6 #include <linux/clk.h> 7 #include <linux/delay.h> 8 #include <linux/host1x.h> 9 #include <linux/iommu.h> 10 #include <linux/module.h> 11 #include <linux/of.h> 12 #include <linux/of_device.h> 13 #include <linux/of_platform.h> 14 #include <linux/platform_device.h> 15 #include <linux/pm_runtime.h> 16 #include <linux/reset.h> 17 18 #include <soc/tegra/pmc.h> 19 20 #include "drm.h" 21 #include "falcon.h" 22 #include "vic.h" 23 24 struct vic_config { 25 const char *firmware; 26 unsigned int version; 27 bool supports_sid; 28 }; 29 30 struct vic { 31 struct falcon falcon; 32 bool booted; 33 34 void __iomem *regs; 35 struct tegra_drm_client client; 36 struct host1x_channel *channel; 37 struct device *dev; 38 struct clk *clk; 39 struct reset_control *rst; 40 41 /* Platform configuration */ 42 const struct vic_config *config; 43 }; 44 45 static inline struct vic *to_vic(struct tegra_drm_client *client) 46 { 47 return container_of(client, struct vic, client); 48 } 49 50 static void vic_writel(struct vic *vic, u32 value, unsigned int offset) 51 { 52 writel(value, vic->regs + offset); 53 } 54 55 static int vic_runtime_resume(struct device *dev) 56 { 57 struct vic *vic = dev_get_drvdata(dev); 58 int err; 59 60 err = clk_prepare_enable(vic->clk); 61 if (err < 0) 62 return err; 63 64 usleep_range(10, 20); 65 66 err = reset_control_deassert(vic->rst); 67 if (err < 0) 68 goto disable; 69 70 usleep_range(10, 20); 71 72 return 0; 73 74 disable: 75 clk_disable_unprepare(vic->clk); 76 return err; 77 } 78 79 static int vic_runtime_suspend(struct device *dev) 80 { 81 struct vic *vic = dev_get_drvdata(dev); 82 int err; 83 84 err = reset_control_assert(vic->rst); 85 if (err < 0) 86 return err; 87 88 usleep_range(2000, 4000); 89 90 clk_disable_unprepare(vic->clk); 91 92 vic->booted = false; 93 94 return 0; 95 } 96 97 static int vic_boot(struct vic *vic) 98 { 99 #ifdef CONFIG_IOMMU_API 100 struct iommu_fwspec *spec = dev_iommu_fwspec_get(vic->dev); 101 #endif 102 u32 fce_ucode_size, fce_bin_data_offset; 103 void *hdr; 104 int err = 0; 105 106 if (vic->booted) 107 return 0; 108 109 #ifdef CONFIG_IOMMU_API 110 if (vic->config->supports_sid && spec) { 111 u32 value; 112 113 value = TRANSCFG_ATT(1, TRANSCFG_SID_FALCON) | 114 TRANSCFG_ATT(0, TRANSCFG_SID_HW); 115 vic_writel(vic, value, VIC_TFBIF_TRANSCFG); 116 117 if (spec->num_ids > 0) { 118 value = spec->ids[0] & 0xffff; 119 120 vic_writel(vic, value, VIC_THI_STREAMID0); 121 vic_writel(vic, value, VIC_THI_STREAMID1); 122 } 123 } 124 #endif 125 126 /* setup clockgating registers */ 127 vic_writel(vic, CG_IDLE_CG_DLY_CNT(4) | 128 CG_IDLE_CG_EN | 129 CG_WAKEUP_DLY_CNT(4), 130 NV_PVIC_MISC_PRI_VIC_CG); 131 132 err = falcon_boot(&vic->falcon); 133 if (err < 0) 134 return err; 135 136 hdr = vic->falcon.firmware.vaddr; 137 fce_bin_data_offset = *(u32 *)(hdr + VIC_UCODE_FCE_DATA_OFFSET); 138 hdr = vic->falcon.firmware.vaddr + 139 *(u32 *)(hdr + VIC_UCODE_FCE_HEADER_OFFSET); 140 fce_ucode_size = *(u32 *)(hdr + FCE_UCODE_SIZE_OFFSET); 141 142 falcon_execute_method(&vic->falcon, VIC_SET_APPLICATION_ID, 1); 143 falcon_execute_method(&vic->falcon, VIC_SET_FCE_UCODE_SIZE, 144 fce_ucode_size); 145 falcon_execute_method(&vic->falcon, VIC_SET_FCE_UCODE_OFFSET, 146 (vic->falcon.firmware.paddr + fce_bin_data_offset) 147 >> 8); 148 149 err = falcon_wait_idle(&vic->falcon); 150 if (err < 0) { 151 dev_err(vic->dev, 152 "failed to set application ID and FCE base\n"); 153 return err; 154 } 155 156 vic->booted = true; 157 158 return 0; 159 } 160 161 static int vic_init(struct host1x_client *client) 162 { 163 struct tegra_drm_client *drm = host1x_to_drm_client(client); 164 struct drm_device *dev = dev_get_drvdata(client->parent); 165 struct tegra_drm *tegra = dev->dev_private; 166 struct vic *vic = to_vic(drm); 167 int err; 168 169 err = host1x_client_iommu_attach(client); 170 if (err < 0) { 171 dev_err(vic->dev, "failed to attach to domain: %d\n", err); 172 return err; 173 } 174 175 vic->channel = host1x_channel_request(client); 176 if (!vic->channel) { 177 err = -ENOMEM; 178 goto detach; 179 } 180 181 client->syncpts[0] = host1x_syncpt_request(client, 0); 182 if (!client->syncpts[0]) { 183 err = -ENOMEM; 184 goto free_channel; 185 } 186 187 err = tegra_drm_register_client(tegra, drm); 188 if (err < 0) 189 goto free_syncpt; 190 191 /* 192 * Inherit the DMA parameters (such as maximum segment size) from the 193 * parent device. 194 */ 195 client->dev->dma_parms = client->parent->dma_parms; 196 197 return 0; 198 199 free_syncpt: 200 host1x_syncpt_free(client->syncpts[0]); 201 free_channel: 202 host1x_channel_put(vic->channel); 203 detach: 204 host1x_client_iommu_detach(client); 205 206 return err; 207 } 208 209 static int vic_exit(struct host1x_client *client) 210 { 211 struct tegra_drm_client *drm = host1x_to_drm_client(client); 212 struct drm_device *dev = dev_get_drvdata(client->parent); 213 struct tegra_drm *tegra = dev->dev_private; 214 struct vic *vic = to_vic(drm); 215 int err; 216 217 /* avoid a dangling pointer just in case this disappears */ 218 client->dev->dma_parms = NULL; 219 220 err = tegra_drm_unregister_client(tegra, drm); 221 if (err < 0) 222 return err; 223 224 host1x_syncpt_free(client->syncpts[0]); 225 host1x_channel_put(vic->channel); 226 host1x_client_iommu_detach(client); 227 228 if (client->group) 229 tegra_drm_free(tegra, vic->falcon.firmware.size, 230 vic->falcon.firmware.vaddr, 231 vic->falcon.firmware.paddr); 232 else 233 dma_free_coherent(vic->dev, vic->falcon.firmware.size, 234 vic->falcon.firmware.vaddr, 235 vic->falcon.firmware.paddr); 236 237 return 0; 238 } 239 240 static const struct host1x_client_ops vic_client_ops = { 241 .init = vic_init, 242 .exit = vic_exit, 243 }; 244 245 static int vic_load_firmware(struct vic *vic) 246 { 247 struct host1x_client *client = &vic->client.base; 248 struct tegra_drm *tegra = vic->client.drm; 249 dma_addr_t phys; 250 size_t size; 251 void *virt; 252 int err; 253 254 if (vic->falcon.firmware.vaddr) 255 return 0; 256 257 err = falcon_read_firmware(&vic->falcon, vic->config->firmware); 258 if (err < 0) 259 return err; 260 261 size = vic->falcon.firmware.size; 262 263 if (!client->group) { 264 virt = dma_alloc_coherent(vic->dev, size, &phys, GFP_KERNEL); 265 266 err = dma_mapping_error(vic->dev, phys); 267 if (err < 0) 268 return err; 269 } else { 270 virt = tegra_drm_alloc(tegra, size, &phys); 271 } 272 273 vic->falcon.firmware.vaddr = virt; 274 vic->falcon.firmware.paddr = phys; 275 276 err = falcon_load_firmware(&vic->falcon); 277 if (err < 0) 278 goto cleanup; 279 280 /* 281 * In this case we have received an IOVA from the shared domain, so we 282 * need to make sure to get the physical address so that the DMA API 283 * knows what memory pages to flush the cache for. 284 */ 285 if (client->group) { 286 phys = dma_map_single(vic->dev, virt, size, DMA_TO_DEVICE); 287 288 err = dma_mapping_error(vic->dev, phys); 289 if (err < 0) 290 goto cleanup; 291 292 /* 293 * If the DMA API mapped this through a bounce buffer, the 294 * dma_sync_single_for_device() call below will not be able 295 * to flush the caches for the right memory pages. Output a 296 * big warning in that case so that the DMA mask can be set 297 * properly and the bounce buffer avoided. 298 */ 299 WARN(phys != vic->falcon.firmware.paddr, 300 "check DMA mask setting for %s\n", dev_name(vic->dev)); 301 } 302 303 dma_sync_single_for_device(vic->dev, phys, size, DMA_TO_DEVICE); 304 305 if (client->group) 306 dma_unmap_single(vic->dev, phys, size, DMA_TO_DEVICE); 307 308 return 0; 309 310 cleanup: 311 if (!client->group) 312 dma_free_coherent(vic->dev, size, virt, phys); 313 else 314 tegra_drm_free(tegra, size, virt, phys); 315 316 return err; 317 } 318 319 static int vic_open_channel(struct tegra_drm_client *client, 320 struct tegra_drm_context *context) 321 { 322 struct vic *vic = to_vic(client); 323 int err; 324 325 err = pm_runtime_get_sync(vic->dev); 326 if (err < 0) 327 return err; 328 329 err = vic_load_firmware(vic); 330 if (err < 0) 331 goto rpm_put; 332 333 err = vic_boot(vic); 334 if (err < 0) 335 goto rpm_put; 336 337 context->channel = host1x_channel_get(vic->channel); 338 if (!context->channel) { 339 err = -ENOMEM; 340 goto rpm_put; 341 } 342 343 return 0; 344 345 rpm_put: 346 pm_runtime_put(vic->dev); 347 return err; 348 } 349 350 static void vic_close_channel(struct tegra_drm_context *context) 351 { 352 struct vic *vic = to_vic(context->client); 353 354 host1x_channel_put(context->channel); 355 356 pm_runtime_put(vic->dev); 357 } 358 359 static const struct tegra_drm_client_ops vic_ops = { 360 .open_channel = vic_open_channel, 361 .close_channel = vic_close_channel, 362 .submit = tegra_drm_submit, 363 }; 364 365 #define NVIDIA_TEGRA_124_VIC_FIRMWARE "nvidia/tegra124/vic03_ucode.bin" 366 367 static const struct vic_config vic_t124_config = { 368 .firmware = NVIDIA_TEGRA_124_VIC_FIRMWARE, 369 .version = 0x40, 370 .supports_sid = false, 371 }; 372 373 #define NVIDIA_TEGRA_210_VIC_FIRMWARE "nvidia/tegra210/vic04_ucode.bin" 374 375 static const struct vic_config vic_t210_config = { 376 .firmware = NVIDIA_TEGRA_210_VIC_FIRMWARE, 377 .version = 0x21, 378 .supports_sid = false, 379 }; 380 381 #define NVIDIA_TEGRA_186_VIC_FIRMWARE "nvidia/tegra186/vic04_ucode.bin" 382 383 static const struct vic_config vic_t186_config = { 384 .firmware = NVIDIA_TEGRA_186_VIC_FIRMWARE, 385 .version = 0x18, 386 .supports_sid = true, 387 }; 388 389 #define NVIDIA_TEGRA_194_VIC_FIRMWARE "nvidia/tegra194/vic.bin" 390 391 static const struct vic_config vic_t194_config = { 392 .firmware = NVIDIA_TEGRA_194_VIC_FIRMWARE, 393 .version = 0x19, 394 .supports_sid = true, 395 }; 396 397 static const struct of_device_id vic_match[] = { 398 { .compatible = "nvidia,tegra124-vic", .data = &vic_t124_config }, 399 { .compatible = "nvidia,tegra210-vic", .data = &vic_t210_config }, 400 { .compatible = "nvidia,tegra186-vic", .data = &vic_t186_config }, 401 { .compatible = "nvidia,tegra194-vic", .data = &vic_t194_config }, 402 { }, 403 }; 404 405 static int vic_probe(struct platform_device *pdev) 406 { 407 struct device *dev = &pdev->dev; 408 struct host1x_syncpt **syncpts; 409 struct resource *regs; 410 struct vic *vic; 411 int err; 412 413 /* inherit DMA mask from host1x parent */ 414 err = dma_coerce_mask_and_coherent(dev, *dev->parent->dma_mask); 415 if (err < 0) { 416 dev_err(&pdev->dev, "failed to set DMA mask: %d\n", err); 417 return err; 418 } 419 420 vic = devm_kzalloc(dev, sizeof(*vic), GFP_KERNEL); 421 if (!vic) 422 return -ENOMEM; 423 424 vic->config = of_device_get_match_data(dev); 425 426 syncpts = devm_kzalloc(dev, sizeof(*syncpts), GFP_KERNEL); 427 if (!syncpts) 428 return -ENOMEM; 429 430 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); 431 if (!regs) { 432 dev_err(&pdev->dev, "failed to get registers\n"); 433 return -ENXIO; 434 } 435 436 vic->regs = devm_ioremap_resource(dev, regs); 437 if (IS_ERR(vic->regs)) 438 return PTR_ERR(vic->regs); 439 440 vic->clk = devm_clk_get(dev, NULL); 441 if (IS_ERR(vic->clk)) { 442 dev_err(&pdev->dev, "failed to get clock\n"); 443 return PTR_ERR(vic->clk); 444 } 445 446 if (!dev->pm_domain) { 447 vic->rst = devm_reset_control_get(dev, "vic"); 448 if (IS_ERR(vic->rst)) { 449 dev_err(&pdev->dev, "failed to get reset\n"); 450 return PTR_ERR(vic->rst); 451 } 452 } 453 454 vic->falcon.dev = dev; 455 vic->falcon.regs = vic->regs; 456 457 err = falcon_init(&vic->falcon); 458 if (err < 0) 459 return err; 460 461 platform_set_drvdata(pdev, vic); 462 463 INIT_LIST_HEAD(&vic->client.base.list); 464 vic->client.base.ops = &vic_client_ops; 465 vic->client.base.dev = dev; 466 vic->client.base.class = HOST1X_CLASS_VIC; 467 vic->client.base.syncpts = syncpts; 468 vic->client.base.num_syncpts = 1; 469 vic->dev = dev; 470 471 INIT_LIST_HEAD(&vic->client.list); 472 vic->client.version = vic->config->version; 473 vic->client.ops = &vic_ops; 474 475 err = host1x_client_register(&vic->client.base); 476 if (err < 0) { 477 dev_err(dev, "failed to register host1x client: %d\n", err); 478 goto exit_falcon; 479 } 480 481 pm_runtime_enable(&pdev->dev); 482 if (!pm_runtime_enabled(&pdev->dev)) { 483 err = vic_runtime_resume(&pdev->dev); 484 if (err < 0) 485 goto unregister_client; 486 } 487 488 return 0; 489 490 unregister_client: 491 host1x_client_unregister(&vic->client.base); 492 exit_falcon: 493 falcon_exit(&vic->falcon); 494 495 return err; 496 } 497 498 static int vic_remove(struct platform_device *pdev) 499 { 500 struct vic *vic = platform_get_drvdata(pdev); 501 int err; 502 503 err = host1x_client_unregister(&vic->client.base); 504 if (err < 0) { 505 dev_err(&pdev->dev, "failed to unregister host1x client: %d\n", 506 err); 507 return err; 508 } 509 510 if (pm_runtime_enabled(&pdev->dev)) 511 pm_runtime_disable(&pdev->dev); 512 else 513 vic_runtime_suspend(&pdev->dev); 514 515 falcon_exit(&vic->falcon); 516 517 return 0; 518 } 519 520 static const struct dev_pm_ops vic_pm_ops = { 521 SET_RUNTIME_PM_OPS(vic_runtime_suspend, vic_runtime_resume, NULL) 522 }; 523 524 struct platform_driver tegra_vic_driver = { 525 .driver = { 526 .name = "tegra-vic", 527 .of_match_table = vic_match, 528 .pm = &vic_pm_ops 529 }, 530 .probe = vic_probe, 531 .remove = vic_remove, 532 }; 533 534 #if IS_ENABLED(CONFIG_ARCH_TEGRA_124_SOC) 535 MODULE_FIRMWARE(NVIDIA_TEGRA_124_VIC_FIRMWARE); 536 #endif 537 #if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC) 538 MODULE_FIRMWARE(NVIDIA_TEGRA_210_VIC_FIRMWARE); 539 #endif 540 #if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC) 541 MODULE_FIRMWARE(NVIDIA_TEGRA_186_VIC_FIRMWARE); 542 #endif 543 #if IS_ENABLED(CONFIG_ARCH_TEGRA_194_SOC) 544 MODULE_FIRMWARE(NVIDIA_TEGRA_194_VIC_FIRMWARE); 545 #endif 546