1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2015, NVIDIA Corporation. 4 */ 5 6 #include <linux/clk.h> 7 #include <linux/delay.h> 8 #include <linux/dma-mapping.h> 9 #include <linux/host1x.h> 10 #include <linux/iommu.h> 11 #include <linux/module.h> 12 #include <linux/of.h> 13 #include <linux/of_device.h> 14 #include <linux/of_platform.h> 15 #include <linux/platform_device.h> 16 #include <linux/pm_runtime.h> 17 #include <linux/reset.h> 18 19 #include <soc/tegra/pmc.h> 20 21 #include "drm.h" 22 #include "falcon.h" 23 #include "vic.h" 24 25 struct vic_config { 26 const char *firmware; 27 unsigned int version; 28 bool supports_sid; 29 }; 30 31 struct vic { 32 struct falcon falcon; 33 34 void __iomem *regs; 35 struct tegra_drm_client client; 36 struct host1x_channel *channel; 37 struct device *dev; 38 struct clk *clk; 39 struct reset_control *rst; 40 41 bool can_use_context; 42 43 /* Platform configuration */ 44 const struct vic_config *config; 45 }; 46 47 static inline struct vic *to_vic(struct tegra_drm_client *client) 48 { 49 return container_of(client, struct vic, client); 50 } 51 52 static void vic_writel(struct vic *vic, u32 value, unsigned int offset) 53 { 54 writel(value, vic->regs + offset); 55 } 56 57 static int vic_boot(struct vic *vic) 58 { 59 #ifdef CONFIG_IOMMU_API 60 struct iommu_fwspec *spec = dev_iommu_fwspec_get(vic->dev); 61 #endif 62 u32 fce_ucode_size, fce_bin_data_offset; 63 void *hdr; 64 int err = 0; 65 66 #ifdef CONFIG_IOMMU_API 67 if (vic->config->supports_sid && spec) { 68 u32 value; 69 70 value = TRANSCFG_ATT(1, TRANSCFG_SID_FALCON) | 71 TRANSCFG_ATT(0, TRANSCFG_SID_HW); 72 vic_writel(vic, value, VIC_TFBIF_TRANSCFG); 73 74 if (spec->num_ids > 0) { 75 value = spec->ids[0] & 0xffff; 76 77 /* 78 * STREAMID0 is used for input/output buffers. 79 * Initialize it to SID_VIC in case context isolation 80 * is not enabled, and SID_VIC is used for both firmware 81 * and data buffers. 82 * 83 * If context isolation is enabled, it will be 84 * overridden by the SETSTREAMID opcode as part of 85 * each job. 86 */ 87 vic_writel(vic, value, VIC_THI_STREAMID0); 88 89 /* STREAMID1 is used for firmware loading. */ 90 vic_writel(vic, value, VIC_THI_STREAMID1); 91 } 92 } 93 #endif 94 95 /* setup clockgating registers */ 96 vic_writel(vic, CG_IDLE_CG_DLY_CNT(4) | 97 CG_IDLE_CG_EN | 98 CG_WAKEUP_DLY_CNT(4), 99 NV_PVIC_MISC_PRI_VIC_CG); 100 101 err = falcon_boot(&vic->falcon); 102 if (err < 0) 103 return err; 104 105 hdr = vic->falcon.firmware.virt; 106 fce_bin_data_offset = *(u32 *)(hdr + VIC_UCODE_FCE_DATA_OFFSET); 107 108 /* Old VIC firmware needs kernel help with setting up FCE microcode. */ 109 if (fce_bin_data_offset != 0x0 && fce_bin_data_offset != 0xa5a5a5a5) { 110 hdr = vic->falcon.firmware.virt + 111 *(u32 *)(hdr + VIC_UCODE_FCE_HEADER_OFFSET); 112 fce_ucode_size = *(u32 *)(hdr + FCE_UCODE_SIZE_OFFSET); 113 114 falcon_execute_method(&vic->falcon, VIC_SET_FCE_UCODE_SIZE, 115 fce_ucode_size); 116 falcon_execute_method( 117 &vic->falcon, VIC_SET_FCE_UCODE_OFFSET, 118 (vic->falcon.firmware.iova + fce_bin_data_offset) >> 8); 119 } 120 121 err = falcon_wait_idle(&vic->falcon); 122 if (err < 0) { 123 dev_err(vic->dev, 124 "failed to set application ID and FCE base\n"); 125 return err; 126 } 127 128 return 0; 129 } 130 131 static int vic_init(struct host1x_client *client) 132 { 133 struct tegra_drm_client *drm = host1x_to_drm_client(client); 134 struct drm_device *dev = dev_get_drvdata(client->host); 135 struct tegra_drm *tegra = dev->dev_private; 136 struct vic *vic = to_vic(drm); 137 int err; 138 139 err = host1x_client_iommu_attach(client); 140 if (err < 0 && err != -ENODEV) { 141 dev_err(vic->dev, "failed to attach to domain: %d\n", err); 142 return err; 143 } 144 145 vic->channel = host1x_channel_request(client); 146 if (!vic->channel) { 147 err = -ENOMEM; 148 goto detach; 149 } 150 151 client->syncpts[0] = host1x_syncpt_request(client, 0); 152 if (!client->syncpts[0]) { 153 err = -ENOMEM; 154 goto free_channel; 155 } 156 157 pm_runtime_enable(client->dev); 158 pm_runtime_use_autosuspend(client->dev); 159 pm_runtime_set_autosuspend_delay(client->dev, 500); 160 161 err = tegra_drm_register_client(tegra, drm); 162 if (err < 0) 163 goto disable_rpm; 164 165 /* 166 * Inherit the DMA parameters (such as maximum segment size) from the 167 * parent host1x device. 168 */ 169 client->dev->dma_parms = client->host->dma_parms; 170 171 return 0; 172 173 disable_rpm: 174 pm_runtime_dont_use_autosuspend(client->dev); 175 pm_runtime_force_suspend(client->dev); 176 177 host1x_syncpt_put(client->syncpts[0]); 178 free_channel: 179 host1x_channel_put(vic->channel); 180 detach: 181 host1x_client_iommu_detach(client); 182 183 return err; 184 } 185 186 static int vic_exit(struct host1x_client *client) 187 { 188 struct tegra_drm_client *drm = host1x_to_drm_client(client); 189 struct drm_device *dev = dev_get_drvdata(client->host); 190 struct tegra_drm *tegra = dev->dev_private; 191 struct vic *vic = to_vic(drm); 192 int err; 193 194 /* avoid a dangling pointer just in case this disappears */ 195 client->dev->dma_parms = NULL; 196 197 err = tegra_drm_unregister_client(tegra, drm); 198 if (err < 0) 199 return err; 200 201 pm_runtime_dont_use_autosuspend(client->dev); 202 pm_runtime_force_suspend(client->dev); 203 204 host1x_syncpt_put(client->syncpts[0]); 205 host1x_channel_put(vic->channel); 206 host1x_client_iommu_detach(client); 207 208 vic->channel = NULL; 209 210 if (client->group) { 211 dma_unmap_single(vic->dev, vic->falcon.firmware.phys, 212 vic->falcon.firmware.size, DMA_TO_DEVICE); 213 tegra_drm_free(tegra, vic->falcon.firmware.size, 214 vic->falcon.firmware.virt, 215 vic->falcon.firmware.iova); 216 } else { 217 dma_free_coherent(vic->dev, vic->falcon.firmware.size, 218 vic->falcon.firmware.virt, 219 vic->falcon.firmware.iova); 220 } 221 222 return 0; 223 } 224 225 static const struct host1x_client_ops vic_client_ops = { 226 .init = vic_init, 227 .exit = vic_exit, 228 }; 229 230 static int vic_load_firmware(struct vic *vic) 231 { 232 struct host1x_client *client = &vic->client.base; 233 struct tegra_drm *tegra = vic->client.drm; 234 static DEFINE_MUTEX(lock); 235 u32 fce_bin_data_offset; 236 dma_addr_t iova; 237 size_t size; 238 void *virt; 239 int err; 240 241 mutex_lock(&lock); 242 243 if (vic->falcon.firmware.virt) { 244 err = 0; 245 goto unlock; 246 } 247 248 err = falcon_read_firmware(&vic->falcon, vic->config->firmware); 249 if (err < 0) 250 goto unlock; 251 252 size = vic->falcon.firmware.size; 253 254 if (!client->group) { 255 virt = dma_alloc_coherent(vic->dev, size, &iova, GFP_KERNEL); 256 if (!virt) { 257 err = -ENOMEM; 258 goto unlock; 259 } 260 } else { 261 virt = tegra_drm_alloc(tegra, size, &iova); 262 if (IS_ERR(virt)) { 263 err = PTR_ERR(virt); 264 goto unlock; 265 } 266 } 267 268 vic->falcon.firmware.virt = virt; 269 vic->falcon.firmware.iova = iova; 270 271 err = falcon_load_firmware(&vic->falcon); 272 if (err < 0) 273 goto cleanup; 274 275 /* 276 * In this case we have received an IOVA from the shared domain, so we 277 * need to make sure to get the physical address so that the DMA API 278 * knows what memory pages to flush the cache for. 279 */ 280 if (client->group) { 281 dma_addr_t phys; 282 283 phys = dma_map_single(vic->dev, virt, size, DMA_TO_DEVICE); 284 285 err = dma_mapping_error(vic->dev, phys); 286 if (err < 0) 287 goto cleanup; 288 289 vic->falcon.firmware.phys = phys; 290 } 291 292 /* 293 * Check if firmware is new enough to not require mapping firmware 294 * to data buffer domains. 295 */ 296 fce_bin_data_offset = *(u32 *)(virt + VIC_UCODE_FCE_DATA_OFFSET); 297 298 if (!vic->config->supports_sid) { 299 vic->can_use_context = false; 300 } else if (fce_bin_data_offset != 0x0 && fce_bin_data_offset != 0xa5a5a5a5) { 301 /* 302 * Firmware will access FCE through STREAMID0, so context 303 * isolation cannot be used. 304 */ 305 vic->can_use_context = false; 306 dev_warn_once(vic->dev, "context isolation disabled due to old firmware\n"); 307 } else { 308 vic->can_use_context = true; 309 } 310 311 unlock: 312 mutex_unlock(&lock); 313 return err; 314 315 cleanup: 316 if (!client->group) 317 dma_free_coherent(vic->dev, size, virt, iova); 318 else 319 tegra_drm_free(tegra, size, virt, iova); 320 321 mutex_unlock(&lock); 322 return err; 323 } 324 325 326 static int vic_runtime_resume(struct device *dev) 327 { 328 struct vic *vic = dev_get_drvdata(dev); 329 int err; 330 331 err = clk_prepare_enable(vic->clk); 332 if (err < 0) 333 return err; 334 335 usleep_range(10, 20); 336 337 err = reset_control_deassert(vic->rst); 338 if (err < 0) 339 goto disable; 340 341 usleep_range(10, 20); 342 343 err = vic_load_firmware(vic); 344 if (err < 0) 345 goto assert; 346 347 err = vic_boot(vic); 348 if (err < 0) 349 goto assert; 350 351 return 0; 352 353 assert: 354 reset_control_assert(vic->rst); 355 disable: 356 clk_disable_unprepare(vic->clk); 357 return err; 358 } 359 360 static int vic_runtime_suspend(struct device *dev) 361 { 362 struct vic *vic = dev_get_drvdata(dev); 363 int err; 364 365 host1x_channel_stop(vic->channel); 366 367 err = reset_control_assert(vic->rst); 368 if (err < 0) 369 return err; 370 371 usleep_range(2000, 4000); 372 373 clk_disable_unprepare(vic->clk); 374 375 return 0; 376 } 377 378 static int vic_open_channel(struct tegra_drm_client *client, 379 struct tegra_drm_context *context) 380 { 381 struct vic *vic = to_vic(client); 382 383 context->channel = host1x_channel_get(vic->channel); 384 if (!context->channel) 385 return -ENOMEM; 386 387 return 0; 388 } 389 390 static void vic_close_channel(struct tegra_drm_context *context) 391 { 392 host1x_channel_put(context->channel); 393 } 394 395 static int vic_can_use_memory_ctx(struct tegra_drm_client *client, bool *supported) 396 { 397 struct vic *vic = to_vic(client); 398 int err; 399 400 /* This doesn't access HW so it's safe to call without powering up. */ 401 err = vic_load_firmware(vic); 402 if (err < 0) 403 return err; 404 405 *supported = vic->can_use_context; 406 407 return 0; 408 } 409 410 static const struct tegra_drm_client_ops vic_ops = { 411 .open_channel = vic_open_channel, 412 .close_channel = vic_close_channel, 413 .submit = tegra_drm_submit, 414 .get_streamid_offset = tegra_drm_get_streamid_offset_thi, 415 .can_use_memory_ctx = vic_can_use_memory_ctx, 416 }; 417 418 #define NVIDIA_TEGRA_124_VIC_FIRMWARE "nvidia/tegra124/vic03_ucode.bin" 419 420 static const struct vic_config vic_t124_config = { 421 .firmware = NVIDIA_TEGRA_124_VIC_FIRMWARE, 422 .version = 0x40, 423 .supports_sid = false, 424 }; 425 426 #define NVIDIA_TEGRA_210_VIC_FIRMWARE "nvidia/tegra210/vic04_ucode.bin" 427 428 static const struct vic_config vic_t210_config = { 429 .firmware = NVIDIA_TEGRA_210_VIC_FIRMWARE, 430 .version = 0x21, 431 .supports_sid = false, 432 }; 433 434 #define NVIDIA_TEGRA_186_VIC_FIRMWARE "nvidia/tegra186/vic04_ucode.bin" 435 436 static const struct vic_config vic_t186_config = { 437 .firmware = NVIDIA_TEGRA_186_VIC_FIRMWARE, 438 .version = 0x18, 439 .supports_sid = true, 440 }; 441 442 #define NVIDIA_TEGRA_194_VIC_FIRMWARE "nvidia/tegra194/vic.bin" 443 444 static const struct vic_config vic_t194_config = { 445 .firmware = NVIDIA_TEGRA_194_VIC_FIRMWARE, 446 .version = 0x19, 447 .supports_sid = true, 448 }; 449 450 static const struct of_device_id tegra_vic_of_match[] = { 451 { .compatible = "nvidia,tegra124-vic", .data = &vic_t124_config }, 452 { .compatible = "nvidia,tegra210-vic", .data = &vic_t210_config }, 453 { .compatible = "nvidia,tegra186-vic", .data = &vic_t186_config }, 454 { .compatible = "nvidia,tegra194-vic", .data = &vic_t194_config }, 455 { }, 456 }; 457 MODULE_DEVICE_TABLE(of, tegra_vic_of_match); 458 459 static int vic_probe(struct platform_device *pdev) 460 { 461 struct device *dev = &pdev->dev; 462 struct host1x_syncpt **syncpts; 463 struct resource *regs; 464 struct vic *vic; 465 int err; 466 467 /* inherit DMA mask from host1x parent */ 468 err = dma_coerce_mask_and_coherent(dev, *dev->parent->dma_mask); 469 if (err < 0) { 470 dev_err(&pdev->dev, "failed to set DMA mask: %d\n", err); 471 return err; 472 } 473 474 vic = devm_kzalloc(dev, sizeof(*vic), GFP_KERNEL); 475 if (!vic) 476 return -ENOMEM; 477 478 vic->config = of_device_get_match_data(dev); 479 480 syncpts = devm_kzalloc(dev, sizeof(*syncpts), GFP_KERNEL); 481 if (!syncpts) 482 return -ENOMEM; 483 484 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); 485 if (!regs) { 486 dev_err(&pdev->dev, "failed to get registers\n"); 487 return -ENXIO; 488 } 489 490 vic->regs = devm_ioremap_resource(dev, regs); 491 if (IS_ERR(vic->regs)) 492 return PTR_ERR(vic->regs); 493 494 vic->clk = devm_clk_get(dev, NULL); 495 if (IS_ERR(vic->clk)) { 496 dev_err(&pdev->dev, "failed to get clock\n"); 497 return PTR_ERR(vic->clk); 498 } 499 500 err = clk_set_rate(vic->clk, ULONG_MAX); 501 if (err < 0) { 502 dev_err(&pdev->dev, "failed to set clock rate\n"); 503 return err; 504 } 505 506 if (!dev->pm_domain) { 507 vic->rst = devm_reset_control_get(dev, "vic"); 508 if (IS_ERR(vic->rst)) { 509 dev_err(&pdev->dev, "failed to get reset\n"); 510 return PTR_ERR(vic->rst); 511 } 512 } 513 514 vic->falcon.dev = dev; 515 vic->falcon.regs = vic->regs; 516 517 err = falcon_init(&vic->falcon); 518 if (err < 0) 519 return err; 520 521 platform_set_drvdata(pdev, vic); 522 523 INIT_LIST_HEAD(&vic->client.base.list); 524 vic->client.base.ops = &vic_client_ops; 525 vic->client.base.dev = dev; 526 vic->client.base.class = HOST1X_CLASS_VIC; 527 vic->client.base.syncpts = syncpts; 528 vic->client.base.num_syncpts = 1; 529 vic->dev = dev; 530 531 INIT_LIST_HEAD(&vic->client.list); 532 vic->client.version = vic->config->version; 533 vic->client.ops = &vic_ops; 534 535 err = host1x_client_register(&vic->client.base); 536 if (err < 0) { 537 dev_err(dev, "failed to register host1x client: %d\n", err); 538 goto exit_falcon; 539 } 540 541 return 0; 542 543 exit_falcon: 544 falcon_exit(&vic->falcon); 545 546 return err; 547 } 548 549 static int vic_remove(struct platform_device *pdev) 550 { 551 struct vic *vic = platform_get_drvdata(pdev); 552 int err; 553 554 err = host1x_client_unregister(&vic->client.base); 555 if (err < 0) { 556 dev_err(&pdev->dev, "failed to unregister host1x client: %d\n", 557 err); 558 return err; 559 } 560 561 falcon_exit(&vic->falcon); 562 563 return 0; 564 } 565 566 static const struct dev_pm_ops vic_pm_ops = { 567 RUNTIME_PM_OPS(vic_runtime_suspend, vic_runtime_resume, NULL) 568 SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume) 569 }; 570 571 struct platform_driver tegra_vic_driver = { 572 .driver = { 573 .name = "tegra-vic", 574 .of_match_table = tegra_vic_of_match, 575 .pm = &vic_pm_ops 576 }, 577 .probe = vic_probe, 578 .remove = vic_remove, 579 }; 580 581 #if IS_ENABLED(CONFIG_ARCH_TEGRA_124_SOC) 582 MODULE_FIRMWARE(NVIDIA_TEGRA_124_VIC_FIRMWARE); 583 #endif 584 #if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC) 585 MODULE_FIRMWARE(NVIDIA_TEGRA_210_VIC_FIRMWARE); 586 #endif 587 #if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC) 588 MODULE_FIRMWARE(NVIDIA_TEGRA_186_VIC_FIRMWARE); 589 #endif 590 #if IS_ENABLED(CONFIG_ARCH_TEGRA_194_SOC) 591 MODULE_FIRMWARE(NVIDIA_TEGRA_194_VIC_FIRMWARE); 592 #endif 593