1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2015, NVIDIA Corporation. 4 */ 5 6 #include <linux/clk.h> 7 #include <linux/delay.h> 8 #include <linux/host1x.h> 9 #include <linux/iommu.h> 10 #include <linux/module.h> 11 #include <linux/of.h> 12 #include <linux/of_device.h> 13 #include <linux/of_platform.h> 14 #include <linux/platform_device.h> 15 #include <linux/pm_runtime.h> 16 #include <linux/reset.h> 17 18 #include <soc/tegra/pmc.h> 19 20 #include "drm.h" 21 #include "falcon.h" 22 #include "vic.h" 23 24 struct vic_config { 25 const char *firmware; 26 unsigned int version; 27 bool supports_sid; 28 }; 29 30 struct vic { 31 struct falcon falcon; 32 bool booted; 33 34 void __iomem *regs; 35 struct tegra_drm_client client; 36 struct host1x_channel *channel; 37 struct device *dev; 38 struct clk *clk; 39 struct reset_control *rst; 40 41 /* Platform configuration */ 42 const struct vic_config *config; 43 }; 44 45 static inline struct vic *to_vic(struct tegra_drm_client *client) 46 { 47 return container_of(client, struct vic, client); 48 } 49 50 static void vic_writel(struct vic *vic, u32 value, unsigned int offset) 51 { 52 writel(value, vic->regs + offset); 53 } 54 55 static int vic_runtime_resume(struct device *dev) 56 { 57 struct vic *vic = dev_get_drvdata(dev); 58 int err; 59 60 err = clk_prepare_enable(vic->clk); 61 if (err < 0) 62 return err; 63 64 usleep_range(10, 20); 65 66 err = reset_control_deassert(vic->rst); 67 if (err < 0) 68 goto disable; 69 70 usleep_range(10, 20); 71 72 return 0; 73 74 disable: 75 clk_disable_unprepare(vic->clk); 76 return err; 77 } 78 79 static int vic_runtime_suspend(struct device *dev) 80 { 81 struct vic *vic = dev_get_drvdata(dev); 82 int err; 83 84 err = reset_control_assert(vic->rst); 85 if (err < 0) 86 return err; 87 88 usleep_range(2000, 4000); 89 90 clk_disable_unprepare(vic->clk); 91 92 vic->booted = false; 93 94 return 0; 95 } 96 97 static int vic_boot(struct vic *vic) 98 { 99 #ifdef CONFIG_IOMMU_API 100 struct iommu_fwspec *spec = dev_iommu_fwspec_get(vic->dev); 101 #endif 102 u32 fce_ucode_size, fce_bin_data_offset; 103 void *hdr; 104 int err = 0; 105 106 if (vic->booted) 107 return 0; 108 109 #ifdef CONFIG_IOMMU_API 110 if (vic->config->supports_sid && spec) { 111 u32 value; 112 113 value = TRANSCFG_ATT(1, TRANSCFG_SID_FALCON) | 114 TRANSCFG_ATT(0, TRANSCFG_SID_HW); 115 vic_writel(vic, value, VIC_TFBIF_TRANSCFG); 116 117 if (spec->num_ids > 0) { 118 value = spec->ids[0] & 0xffff; 119 120 /* 121 * STREAMID0 is used for input/output buffers. 122 * Initialize it to SID_VIC in case context isolation 123 * is not enabled, and SID_VIC is used for both firmware 124 * and data buffers. 125 * 126 * If context isolation is enabled, it will be 127 * overridden by the SETSTREAMID opcode as part of 128 * each job. 129 */ 130 vic_writel(vic, value, VIC_THI_STREAMID0); 131 132 /* STREAMID1 is used for firmware loading. */ 133 vic_writel(vic, value, VIC_THI_STREAMID1); 134 } 135 } 136 #endif 137 138 /* setup clockgating registers */ 139 vic_writel(vic, CG_IDLE_CG_DLY_CNT(4) | 140 CG_IDLE_CG_EN | 141 CG_WAKEUP_DLY_CNT(4), 142 NV_PVIC_MISC_PRI_VIC_CG); 143 144 err = falcon_boot(&vic->falcon); 145 if (err < 0) 146 return err; 147 148 hdr = vic->falcon.firmware.virt; 149 fce_bin_data_offset = *(u32 *)(hdr + VIC_UCODE_FCE_DATA_OFFSET); 150 151 /* Old VIC firmware needs kernel help with setting up FCE microcode. */ 152 if (fce_bin_data_offset != 0x0 && fce_bin_data_offset != 0xa5a5a5a5) { 153 hdr = vic->falcon.firmware.virt + 154 *(u32 *)(hdr + VIC_UCODE_FCE_HEADER_OFFSET); 155 fce_ucode_size = *(u32 *)(hdr + FCE_UCODE_SIZE_OFFSET); 156 157 falcon_execute_method(&vic->falcon, VIC_SET_FCE_UCODE_SIZE, 158 fce_ucode_size); 159 falcon_execute_method( 160 &vic->falcon, VIC_SET_FCE_UCODE_OFFSET, 161 (vic->falcon.firmware.iova + fce_bin_data_offset) >> 8); 162 } 163 164 err = falcon_wait_idle(&vic->falcon); 165 if (err < 0) { 166 dev_err(vic->dev, 167 "failed to set application ID and FCE base\n"); 168 return err; 169 } 170 171 vic->booted = true; 172 173 return 0; 174 } 175 176 static int vic_init(struct host1x_client *client) 177 { 178 struct tegra_drm_client *drm = host1x_to_drm_client(client); 179 struct drm_device *dev = dev_get_drvdata(client->host); 180 struct tegra_drm *tegra = dev->dev_private; 181 struct vic *vic = to_vic(drm); 182 int err; 183 184 err = host1x_client_iommu_attach(client); 185 if (err < 0 && err != -ENODEV) { 186 dev_err(vic->dev, "failed to attach to domain: %d\n", err); 187 return err; 188 } 189 190 vic->channel = host1x_channel_request(client); 191 if (!vic->channel) { 192 err = -ENOMEM; 193 goto detach; 194 } 195 196 client->syncpts[0] = host1x_syncpt_request(client, 0); 197 if (!client->syncpts[0]) { 198 err = -ENOMEM; 199 goto free_channel; 200 } 201 202 err = tegra_drm_register_client(tegra, drm); 203 if (err < 0) 204 goto free_syncpt; 205 206 /* 207 * Inherit the DMA parameters (such as maximum segment size) from the 208 * parent host1x device. 209 */ 210 client->dev->dma_parms = client->host->dma_parms; 211 212 return 0; 213 214 free_syncpt: 215 host1x_syncpt_put(client->syncpts[0]); 216 free_channel: 217 host1x_channel_put(vic->channel); 218 detach: 219 host1x_client_iommu_detach(client); 220 221 return err; 222 } 223 224 static int vic_exit(struct host1x_client *client) 225 { 226 struct tegra_drm_client *drm = host1x_to_drm_client(client); 227 struct drm_device *dev = dev_get_drvdata(client->host); 228 struct tegra_drm *tegra = dev->dev_private; 229 struct vic *vic = to_vic(drm); 230 int err; 231 232 /* avoid a dangling pointer just in case this disappears */ 233 client->dev->dma_parms = NULL; 234 235 err = tegra_drm_unregister_client(tegra, drm); 236 if (err < 0) 237 return err; 238 239 host1x_syncpt_put(client->syncpts[0]); 240 host1x_channel_put(vic->channel); 241 host1x_client_iommu_detach(client); 242 243 if (client->group) { 244 dma_unmap_single(vic->dev, vic->falcon.firmware.phys, 245 vic->falcon.firmware.size, DMA_TO_DEVICE); 246 tegra_drm_free(tegra, vic->falcon.firmware.size, 247 vic->falcon.firmware.virt, 248 vic->falcon.firmware.iova); 249 } else { 250 dma_free_coherent(vic->dev, vic->falcon.firmware.size, 251 vic->falcon.firmware.virt, 252 vic->falcon.firmware.iova); 253 } 254 255 return 0; 256 } 257 258 static const struct host1x_client_ops vic_client_ops = { 259 .init = vic_init, 260 .exit = vic_exit, 261 }; 262 263 static int vic_load_firmware(struct vic *vic) 264 { 265 struct host1x_client *client = &vic->client.base; 266 struct tegra_drm *tegra = vic->client.drm; 267 dma_addr_t iova; 268 size_t size; 269 void *virt; 270 int err; 271 272 if (vic->falcon.firmware.virt) 273 return 0; 274 275 err = falcon_read_firmware(&vic->falcon, vic->config->firmware); 276 if (err < 0) 277 return err; 278 279 size = vic->falcon.firmware.size; 280 281 if (!client->group) { 282 virt = dma_alloc_coherent(vic->dev, size, &iova, GFP_KERNEL); 283 284 err = dma_mapping_error(vic->dev, iova); 285 if (err < 0) 286 return err; 287 } else { 288 virt = tegra_drm_alloc(tegra, size, &iova); 289 } 290 291 vic->falcon.firmware.virt = virt; 292 vic->falcon.firmware.iova = iova; 293 294 err = falcon_load_firmware(&vic->falcon); 295 if (err < 0) 296 goto cleanup; 297 298 /* 299 * In this case we have received an IOVA from the shared domain, so we 300 * need to make sure to get the physical address so that the DMA API 301 * knows what memory pages to flush the cache for. 302 */ 303 if (client->group) { 304 dma_addr_t phys; 305 306 phys = dma_map_single(vic->dev, virt, size, DMA_TO_DEVICE); 307 308 err = dma_mapping_error(vic->dev, phys); 309 if (err < 0) 310 goto cleanup; 311 312 vic->falcon.firmware.phys = phys; 313 } 314 315 return 0; 316 317 cleanup: 318 if (!client->group) 319 dma_free_coherent(vic->dev, size, virt, iova); 320 else 321 tegra_drm_free(tegra, size, virt, iova); 322 323 return err; 324 } 325 326 static int vic_open_channel(struct tegra_drm_client *client, 327 struct tegra_drm_context *context) 328 { 329 struct vic *vic = to_vic(client); 330 int err; 331 332 err = pm_runtime_resume_and_get(vic->dev); 333 if (err < 0) 334 return err; 335 336 err = vic_load_firmware(vic); 337 if (err < 0) 338 goto rpm_put; 339 340 err = vic_boot(vic); 341 if (err < 0) 342 goto rpm_put; 343 344 context->channel = host1x_channel_get(vic->channel); 345 if (!context->channel) { 346 err = -ENOMEM; 347 goto rpm_put; 348 } 349 350 return 0; 351 352 rpm_put: 353 pm_runtime_put(vic->dev); 354 return err; 355 } 356 357 static void vic_close_channel(struct tegra_drm_context *context) 358 { 359 struct vic *vic = to_vic(context->client); 360 361 host1x_channel_put(context->channel); 362 363 pm_runtime_put(vic->dev); 364 } 365 366 static const struct tegra_drm_client_ops vic_ops = { 367 .open_channel = vic_open_channel, 368 .close_channel = vic_close_channel, 369 .submit = tegra_drm_submit, 370 }; 371 372 #define NVIDIA_TEGRA_124_VIC_FIRMWARE "nvidia/tegra124/vic03_ucode.bin" 373 374 static const struct vic_config vic_t124_config = { 375 .firmware = NVIDIA_TEGRA_124_VIC_FIRMWARE, 376 .version = 0x40, 377 .supports_sid = false, 378 }; 379 380 #define NVIDIA_TEGRA_210_VIC_FIRMWARE "nvidia/tegra210/vic04_ucode.bin" 381 382 static const struct vic_config vic_t210_config = { 383 .firmware = NVIDIA_TEGRA_210_VIC_FIRMWARE, 384 .version = 0x21, 385 .supports_sid = false, 386 }; 387 388 #define NVIDIA_TEGRA_186_VIC_FIRMWARE "nvidia/tegra186/vic04_ucode.bin" 389 390 static const struct vic_config vic_t186_config = { 391 .firmware = NVIDIA_TEGRA_186_VIC_FIRMWARE, 392 .version = 0x18, 393 .supports_sid = true, 394 }; 395 396 #define NVIDIA_TEGRA_194_VIC_FIRMWARE "nvidia/tegra194/vic.bin" 397 398 static const struct vic_config vic_t194_config = { 399 .firmware = NVIDIA_TEGRA_194_VIC_FIRMWARE, 400 .version = 0x19, 401 .supports_sid = true, 402 }; 403 404 static const struct of_device_id tegra_vic_of_match[] = { 405 { .compatible = "nvidia,tegra124-vic", .data = &vic_t124_config }, 406 { .compatible = "nvidia,tegra210-vic", .data = &vic_t210_config }, 407 { .compatible = "nvidia,tegra186-vic", .data = &vic_t186_config }, 408 { .compatible = "nvidia,tegra194-vic", .data = &vic_t194_config }, 409 { }, 410 }; 411 MODULE_DEVICE_TABLE(of, tegra_vic_of_match); 412 413 static int vic_probe(struct platform_device *pdev) 414 { 415 struct device *dev = &pdev->dev; 416 struct host1x_syncpt **syncpts; 417 struct resource *regs; 418 struct vic *vic; 419 int err; 420 421 /* inherit DMA mask from host1x parent */ 422 err = dma_coerce_mask_and_coherent(dev, *dev->parent->dma_mask); 423 if (err < 0) { 424 dev_err(&pdev->dev, "failed to set DMA mask: %d\n", err); 425 return err; 426 } 427 428 vic = devm_kzalloc(dev, sizeof(*vic), GFP_KERNEL); 429 if (!vic) 430 return -ENOMEM; 431 432 vic->config = of_device_get_match_data(dev); 433 434 syncpts = devm_kzalloc(dev, sizeof(*syncpts), GFP_KERNEL); 435 if (!syncpts) 436 return -ENOMEM; 437 438 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); 439 if (!regs) { 440 dev_err(&pdev->dev, "failed to get registers\n"); 441 return -ENXIO; 442 } 443 444 vic->regs = devm_ioremap_resource(dev, regs); 445 if (IS_ERR(vic->regs)) 446 return PTR_ERR(vic->regs); 447 448 vic->clk = devm_clk_get(dev, NULL); 449 if (IS_ERR(vic->clk)) { 450 dev_err(&pdev->dev, "failed to get clock\n"); 451 return PTR_ERR(vic->clk); 452 } 453 454 if (!dev->pm_domain) { 455 vic->rst = devm_reset_control_get(dev, "vic"); 456 if (IS_ERR(vic->rst)) { 457 dev_err(&pdev->dev, "failed to get reset\n"); 458 return PTR_ERR(vic->rst); 459 } 460 } 461 462 vic->falcon.dev = dev; 463 vic->falcon.regs = vic->regs; 464 465 err = falcon_init(&vic->falcon); 466 if (err < 0) 467 return err; 468 469 platform_set_drvdata(pdev, vic); 470 471 INIT_LIST_HEAD(&vic->client.base.list); 472 vic->client.base.ops = &vic_client_ops; 473 vic->client.base.dev = dev; 474 vic->client.base.class = HOST1X_CLASS_VIC; 475 vic->client.base.syncpts = syncpts; 476 vic->client.base.num_syncpts = 1; 477 vic->dev = dev; 478 479 INIT_LIST_HEAD(&vic->client.list); 480 vic->client.version = vic->config->version; 481 vic->client.ops = &vic_ops; 482 483 err = host1x_client_register(&vic->client.base); 484 if (err < 0) { 485 dev_err(dev, "failed to register host1x client: %d\n", err); 486 goto exit_falcon; 487 } 488 489 pm_runtime_enable(&pdev->dev); 490 if (!pm_runtime_enabled(&pdev->dev)) { 491 err = vic_runtime_resume(&pdev->dev); 492 if (err < 0) 493 goto unregister_client; 494 } 495 496 return 0; 497 498 unregister_client: 499 host1x_client_unregister(&vic->client.base); 500 exit_falcon: 501 falcon_exit(&vic->falcon); 502 503 return err; 504 } 505 506 static int vic_remove(struct platform_device *pdev) 507 { 508 struct vic *vic = platform_get_drvdata(pdev); 509 int err; 510 511 err = host1x_client_unregister(&vic->client.base); 512 if (err < 0) { 513 dev_err(&pdev->dev, "failed to unregister host1x client: %d\n", 514 err); 515 return err; 516 } 517 518 if (pm_runtime_enabled(&pdev->dev)) 519 pm_runtime_disable(&pdev->dev); 520 else 521 vic_runtime_suspend(&pdev->dev); 522 523 falcon_exit(&vic->falcon); 524 525 return 0; 526 } 527 528 static const struct dev_pm_ops vic_pm_ops = { 529 SET_RUNTIME_PM_OPS(vic_runtime_suspend, vic_runtime_resume, NULL) 530 }; 531 532 struct platform_driver tegra_vic_driver = { 533 .driver = { 534 .name = "tegra-vic", 535 .of_match_table = tegra_vic_of_match, 536 .pm = &vic_pm_ops 537 }, 538 .probe = vic_probe, 539 .remove = vic_remove, 540 }; 541 542 #if IS_ENABLED(CONFIG_ARCH_TEGRA_124_SOC) 543 MODULE_FIRMWARE(NVIDIA_TEGRA_124_VIC_FIRMWARE); 544 #endif 545 #if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC) 546 MODULE_FIRMWARE(NVIDIA_TEGRA_210_VIC_FIRMWARE); 547 #endif 548 #if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC) 549 MODULE_FIRMWARE(NVIDIA_TEGRA_186_VIC_FIRMWARE); 550 #endif 551 #if IS_ENABLED(CONFIG_ARCH_TEGRA_194_SOC) 552 MODULE_FIRMWARE(NVIDIA_TEGRA_194_VIC_FIRMWARE); 553 #endif 554