1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2015, NVIDIA Corporation. 4 */ 5 6 #include <linux/clk.h> 7 #include <linux/delay.h> 8 #include <linux/host1x.h> 9 #include <linux/iommu.h> 10 #include <linux/module.h> 11 #include <linux/of.h> 12 #include <linux/of_device.h> 13 #include <linux/of_platform.h> 14 #include <linux/platform_device.h> 15 #include <linux/pm_runtime.h> 16 #include <linux/reset.h> 17 18 #include <soc/tegra/pmc.h> 19 20 #include "drm.h" 21 #include "falcon.h" 22 #include "vic.h" 23 24 struct vic_config { 25 const char *firmware; 26 unsigned int version; 27 bool supports_sid; 28 }; 29 30 struct vic { 31 struct falcon falcon; 32 bool booted; 33 34 void __iomem *regs; 35 struct tegra_drm_client client; 36 struct host1x_channel *channel; 37 struct device *dev; 38 struct clk *clk; 39 struct reset_control *rst; 40 41 /* Platform configuration */ 42 const struct vic_config *config; 43 }; 44 45 static inline struct vic *to_vic(struct tegra_drm_client *client) 46 { 47 return container_of(client, struct vic, client); 48 } 49 50 static void vic_writel(struct vic *vic, u32 value, unsigned int offset) 51 { 52 writel(value, vic->regs + offset); 53 } 54 55 static int vic_runtime_resume(struct device *dev) 56 { 57 struct vic *vic = dev_get_drvdata(dev); 58 int err; 59 60 err = clk_prepare_enable(vic->clk); 61 if (err < 0) 62 return err; 63 64 usleep_range(10, 20); 65 66 err = reset_control_deassert(vic->rst); 67 if (err < 0) 68 goto disable; 69 70 usleep_range(10, 20); 71 72 return 0; 73 74 disable: 75 clk_disable_unprepare(vic->clk); 76 return err; 77 } 78 79 static int vic_runtime_suspend(struct device *dev) 80 { 81 struct vic *vic = dev_get_drvdata(dev); 82 int err; 83 84 err = reset_control_assert(vic->rst); 85 if (err < 0) 86 return err; 87 88 usleep_range(2000, 4000); 89 90 clk_disable_unprepare(vic->clk); 91 92 vic->booted = false; 93 94 return 0; 95 } 96 97 static int vic_boot(struct vic *vic) 98 { 99 #ifdef CONFIG_IOMMU_API 100 struct iommu_fwspec *spec = dev_iommu_fwspec_get(vic->dev); 101 #endif 102 u32 fce_ucode_size, fce_bin_data_offset; 103 void *hdr; 104 int err = 0; 105 106 if (vic->booted) 107 return 0; 108 109 #ifdef CONFIG_IOMMU_API 110 if (vic->config->supports_sid && spec) { 111 u32 value; 112 113 value = TRANSCFG_ATT(1, TRANSCFG_SID_FALCON) | 114 TRANSCFG_ATT(0, TRANSCFG_SID_HW); 115 vic_writel(vic, value, VIC_TFBIF_TRANSCFG); 116 117 if (spec->num_ids > 0) { 118 value = spec->ids[0] & 0xffff; 119 120 /* 121 * STREAMID0 is used for input/output buffers. 122 * Initialize it to SID_VIC in case context isolation 123 * is not enabled, and SID_VIC is used for both firmware 124 * and data buffers. 125 * 126 * If context isolation is enabled, it will be 127 * overridden by the SETSTREAMID opcode as part of 128 * each job. 129 */ 130 vic_writel(vic, value, VIC_THI_STREAMID0); 131 132 /* STREAMID1 is used for firmware loading. */ 133 vic_writel(vic, value, VIC_THI_STREAMID1); 134 } 135 } 136 #endif 137 138 /* setup clockgating registers */ 139 vic_writel(vic, CG_IDLE_CG_DLY_CNT(4) | 140 CG_IDLE_CG_EN | 141 CG_WAKEUP_DLY_CNT(4), 142 NV_PVIC_MISC_PRI_VIC_CG); 143 144 err = falcon_boot(&vic->falcon); 145 if (err < 0) 146 return err; 147 148 hdr = vic->falcon.firmware.virt; 149 fce_bin_data_offset = *(u32 *)(hdr + VIC_UCODE_FCE_DATA_OFFSET); 150 151 falcon_execute_method(&vic->falcon, VIC_SET_APPLICATION_ID, 1); 152 153 /* Old VIC firmware needs kernel help with setting up FCE microcode. */ 154 if (fce_bin_data_offset != 0x0 && fce_bin_data_offset != 0xa5a5a5a5) { 155 hdr = vic->falcon.firmware.virt + 156 *(u32 *)(hdr + VIC_UCODE_FCE_HEADER_OFFSET); 157 fce_ucode_size = *(u32 *)(hdr + FCE_UCODE_SIZE_OFFSET); 158 159 falcon_execute_method(&vic->falcon, VIC_SET_FCE_UCODE_SIZE, 160 fce_ucode_size); 161 falcon_execute_method( 162 &vic->falcon, VIC_SET_FCE_UCODE_OFFSET, 163 (vic->falcon.firmware.iova + fce_bin_data_offset) >> 8); 164 } 165 166 err = falcon_wait_idle(&vic->falcon); 167 if (err < 0) { 168 dev_err(vic->dev, 169 "failed to set application ID and FCE base\n"); 170 return err; 171 } 172 173 vic->booted = true; 174 175 return 0; 176 } 177 178 static int vic_init(struct host1x_client *client) 179 { 180 struct tegra_drm_client *drm = host1x_to_drm_client(client); 181 struct drm_device *dev = dev_get_drvdata(client->host); 182 struct tegra_drm *tegra = dev->dev_private; 183 struct vic *vic = to_vic(drm); 184 int err; 185 186 err = host1x_client_iommu_attach(client); 187 if (err < 0 && err != -ENODEV) { 188 dev_err(vic->dev, "failed to attach to domain: %d\n", err); 189 return err; 190 } 191 192 vic->channel = host1x_channel_request(client); 193 if (!vic->channel) { 194 err = -ENOMEM; 195 goto detach; 196 } 197 198 client->syncpts[0] = host1x_syncpt_request(client, 0); 199 if (!client->syncpts[0]) { 200 err = -ENOMEM; 201 goto free_channel; 202 } 203 204 err = tegra_drm_register_client(tegra, drm); 205 if (err < 0) 206 goto free_syncpt; 207 208 /* 209 * Inherit the DMA parameters (such as maximum segment size) from the 210 * parent host1x device. 211 */ 212 client->dev->dma_parms = client->host->dma_parms; 213 214 return 0; 215 216 free_syncpt: 217 host1x_syncpt_free(client->syncpts[0]); 218 free_channel: 219 host1x_channel_put(vic->channel); 220 detach: 221 host1x_client_iommu_detach(client); 222 223 return err; 224 } 225 226 static int vic_exit(struct host1x_client *client) 227 { 228 struct tegra_drm_client *drm = host1x_to_drm_client(client); 229 struct drm_device *dev = dev_get_drvdata(client->host); 230 struct tegra_drm *tegra = dev->dev_private; 231 struct vic *vic = to_vic(drm); 232 int err; 233 234 /* avoid a dangling pointer just in case this disappears */ 235 client->dev->dma_parms = NULL; 236 237 err = tegra_drm_unregister_client(tegra, drm); 238 if (err < 0) 239 return err; 240 241 host1x_syncpt_free(client->syncpts[0]); 242 host1x_channel_put(vic->channel); 243 host1x_client_iommu_detach(client); 244 245 if (client->group) { 246 dma_unmap_single(vic->dev, vic->falcon.firmware.phys, 247 vic->falcon.firmware.size, DMA_TO_DEVICE); 248 tegra_drm_free(tegra, vic->falcon.firmware.size, 249 vic->falcon.firmware.virt, 250 vic->falcon.firmware.iova); 251 } else { 252 dma_free_coherent(vic->dev, vic->falcon.firmware.size, 253 vic->falcon.firmware.virt, 254 vic->falcon.firmware.iova); 255 } 256 257 return 0; 258 } 259 260 static const struct host1x_client_ops vic_client_ops = { 261 .init = vic_init, 262 .exit = vic_exit, 263 }; 264 265 static int vic_load_firmware(struct vic *vic) 266 { 267 struct host1x_client *client = &vic->client.base; 268 struct tegra_drm *tegra = vic->client.drm; 269 dma_addr_t iova; 270 size_t size; 271 void *virt; 272 int err; 273 274 if (vic->falcon.firmware.virt) 275 return 0; 276 277 err = falcon_read_firmware(&vic->falcon, vic->config->firmware); 278 if (err < 0) 279 return err; 280 281 size = vic->falcon.firmware.size; 282 283 if (!client->group) { 284 virt = dma_alloc_coherent(vic->dev, size, &iova, GFP_KERNEL); 285 286 err = dma_mapping_error(vic->dev, iova); 287 if (err < 0) 288 return err; 289 } else { 290 virt = tegra_drm_alloc(tegra, size, &iova); 291 } 292 293 vic->falcon.firmware.virt = virt; 294 vic->falcon.firmware.iova = iova; 295 296 err = falcon_load_firmware(&vic->falcon); 297 if (err < 0) 298 goto cleanup; 299 300 /* 301 * In this case we have received an IOVA from the shared domain, so we 302 * need to make sure to get the physical address so that the DMA API 303 * knows what memory pages to flush the cache for. 304 */ 305 if (client->group) { 306 dma_addr_t phys; 307 308 phys = dma_map_single(vic->dev, virt, size, DMA_TO_DEVICE); 309 310 err = dma_mapping_error(vic->dev, phys); 311 if (err < 0) 312 goto cleanup; 313 314 vic->falcon.firmware.phys = phys; 315 } 316 317 return 0; 318 319 cleanup: 320 if (!client->group) 321 dma_free_coherent(vic->dev, size, virt, iova); 322 else 323 tegra_drm_free(tegra, size, virt, iova); 324 325 return err; 326 } 327 328 static int vic_open_channel(struct tegra_drm_client *client, 329 struct tegra_drm_context *context) 330 { 331 struct vic *vic = to_vic(client); 332 int err; 333 334 err = pm_runtime_resume_and_get(vic->dev); 335 if (err < 0) 336 return err; 337 338 err = vic_load_firmware(vic); 339 if (err < 0) 340 goto rpm_put; 341 342 err = vic_boot(vic); 343 if (err < 0) 344 goto rpm_put; 345 346 context->channel = host1x_channel_get(vic->channel); 347 if (!context->channel) { 348 err = -ENOMEM; 349 goto rpm_put; 350 } 351 352 return 0; 353 354 rpm_put: 355 pm_runtime_put(vic->dev); 356 return err; 357 } 358 359 static void vic_close_channel(struct tegra_drm_context *context) 360 { 361 struct vic *vic = to_vic(context->client); 362 363 host1x_channel_put(context->channel); 364 365 pm_runtime_put(vic->dev); 366 } 367 368 static const struct tegra_drm_client_ops vic_ops = { 369 .open_channel = vic_open_channel, 370 .close_channel = vic_close_channel, 371 .submit = tegra_drm_submit, 372 }; 373 374 #define NVIDIA_TEGRA_124_VIC_FIRMWARE "nvidia/tegra124/vic03_ucode.bin" 375 376 static const struct vic_config vic_t124_config = { 377 .firmware = NVIDIA_TEGRA_124_VIC_FIRMWARE, 378 .version = 0x40, 379 .supports_sid = false, 380 }; 381 382 #define NVIDIA_TEGRA_210_VIC_FIRMWARE "nvidia/tegra210/vic04_ucode.bin" 383 384 static const struct vic_config vic_t210_config = { 385 .firmware = NVIDIA_TEGRA_210_VIC_FIRMWARE, 386 .version = 0x21, 387 .supports_sid = false, 388 }; 389 390 #define NVIDIA_TEGRA_186_VIC_FIRMWARE "nvidia/tegra186/vic04_ucode.bin" 391 392 static const struct vic_config vic_t186_config = { 393 .firmware = NVIDIA_TEGRA_186_VIC_FIRMWARE, 394 .version = 0x18, 395 .supports_sid = true, 396 }; 397 398 #define NVIDIA_TEGRA_194_VIC_FIRMWARE "nvidia/tegra194/vic.bin" 399 400 static const struct vic_config vic_t194_config = { 401 .firmware = NVIDIA_TEGRA_194_VIC_FIRMWARE, 402 .version = 0x19, 403 .supports_sid = true, 404 }; 405 406 static const struct of_device_id tegra_vic_of_match[] = { 407 { .compatible = "nvidia,tegra124-vic", .data = &vic_t124_config }, 408 { .compatible = "nvidia,tegra210-vic", .data = &vic_t210_config }, 409 { .compatible = "nvidia,tegra186-vic", .data = &vic_t186_config }, 410 { .compatible = "nvidia,tegra194-vic", .data = &vic_t194_config }, 411 { }, 412 }; 413 MODULE_DEVICE_TABLE(of, tegra_vic_of_match); 414 415 static int vic_probe(struct platform_device *pdev) 416 { 417 struct device *dev = &pdev->dev; 418 struct host1x_syncpt **syncpts; 419 struct resource *regs; 420 struct vic *vic; 421 int err; 422 423 /* inherit DMA mask from host1x parent */ 424 err = dma_coerce_mask_and_coherent(dev, *dev->parent->dma_mask); 425 if (err < 0) { 426 dev_err(&pdev->dev, "failed to set DMA mask: %d\n", err); 427 return err; 428 } 429 430 vic = devm_kzalloc(dev, sizeof(*vic), GFP_KERNEL); 431 if (!vic) 432 return -ENOMEM; 433 434 vic->config = of_device_get_match_data(dev); 435 436 syncpts = devm_kzalloc(dev, sizeof(*syncpts), GFP_KERNEL); 437 if (!syncpts) 438 return -ENOMEM; 439 440 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); 441 if (!regs) { 442 dev_err(&pdev->dev, "failed to get registers\n"); 443 return -ENXIO; 444 } 445 446 vic->regs = devm_ioremap_resource(dev, regs); 447 if (IS_ERR(vic->regs)) 448 return PTR_ERR(vic->regs); 449 450 vic->clk = devm_clk_get(dev, NULL); 451 if (IS_ERR(vic->clk)) { 452 dev_err(&pdev->dev, "failed to get clock\n"); 453 return PTR_ERR(vic->clk); 454 } 455 456 if (!dev->pm_domain) { 457 vic->rst = devm_reset_control_get(dev, "vic"); 458 if (IS_ERR(vic->rst)) { 459 dev_err(&pdev->dev, "failed to get reset\n"); 460 return PTR_ERR(vic->rst); 461 } 462 } 463 464 vic->falcon.dev = dev; 465 vic->falcon.regs = vic->regs; 466 467 err = falcon_init(&vic->falcon); 468 if (err < 0) 469 return err; 470 471 platform_set_drvdata(pdev, vic); 472 473 INIT_LIST_HEAD(&vic->client.base.list); 474 vic->client.base.ops = &vic_client_ops; 475 vic->client.base.dev = dev; 476 vic->client.base.class = HOST1X_CLASS_VIC; 477 vic->client.base.syncpts = syncpts; 478 vic->client.base.num_syncpts = 1; 479 vic->dev = dev; 480 481 INIT_LIST_HEAD(&vic->client.list); 482 vic->client.version = vic->config->version; 483 vic->client.ops = &vic_ops; 484 485 err = host1x_client_register(&vic->client.base); 486 if (err < 0) { 487 dev_err(dev, "failed to register host1x client: %d\n", err); 488 goto exit_falcon; 489 } 490 491 pm_runtime_enable(&pdev->dev); 492 if (!pm_runtime_enabled(&pdev->dev)) { 493 err = vic_runtime_resume(&pdev->dev); 494 if (err < 0) 495 goto unregister_client; 496 } 497 498 return 0; 499 500 unregister_client: 501 host1x_client_unregister(&vic->client.base); 502 exit_falcon: 503 falcon_exit(&vic->falcon); 504 505 return err; 506 } 507 508 static int vic_remove(struct platform_device *pdev) 509 { 510 struct vic *vic = platform_get_drvdata(pdev); 511 int err; 512 513 err = host1x_client_unregister(&vic->client.base); 514 if (err < 0) { 515 dev_err(&pdev->dev, "failed to unregister host1x client: %d\n", 516 err); 517 return err; 518 } 519 520 if (pm_runtime_enabled(&pdev->dev)) 521 pm_runtime_disable(&pdev->dev); 522 else 523 vic_runtime_suspend(&pdev->dev); 524 525 falcon_exit(&vic->falcon); 526 527 return 0; 528 } 529 530 static const struct dev_pm_ops vic_pm_ops = { 531 SET_RUNTIME_PM_OPS(vic_runtime_suspend, vic_runtime_resume, NULL) 532 }; 533 534 struct platform_driver tegra_vic_driver = { 535 .driver = { 536 .name = "tegra-vic", 537 .of_match_table = tegra_vic_of_match, 538 .pm = &vic_pm_ops 539 }, 540 .probe = vic_probe, 541 .remove = vic_remove, 542 }; 543 544 #if IS_ENABLED(CONFIG_ARCH_TEGRA_124_SOC) 545 MODULE_FIRMWARE(NVIDIA_TEGRA_124_VIC_FIRMWARE); 546 #endif 547 #if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC) 548 MODULE_FIRMWARE(NVIDIA_TEGRA_210_VIC_FIRMWARE); 549 #endif 550 #if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC) 551 MODULE_FIRMWARE(NVIDIA_TEGRA_186_VIC_FIRMWARE); 552 #endif 553 #if IS_ENABLED(CONFIG_ARCH_TEGRA_194_SOC) 554 MODULE_FIRMWARE(NVIDIA_TEGRA_194_VIC_FIRMWARE); 555 #endif 556