1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2015, NVIDIA Corporation. 4 */ 5 6 #include <linux/clk.h> 7 #include <linux/delay.h> 8 #include <linux/dma-mapping.h> 9 #include <linux/host1x.h> 10 #include <linux/iommu.h> 11 #include <linux/module.h> 12 #include <linux/of.h> 13 #include <linux/of_device.h> 14 #include <linux/of_platform.h> 15 #include <linux/platform_device.h> 16 #include <linux/pm_runtime.h> 17 #include <linux/reset.h> 18 19 #include <soc/tegra/pmc.h> 20 21 #include "drm.h" 22 #include "falcon.h" 23 #include "vic.h" 24 25 struct vic_config { 26 const char *firmware; 27 unsigned int version; 28 bool supports_sid; 29 }; 30 31 struct vic { 32 struct falcon falcon; 33 34 void __iomem *regs; 35 struct tegra_drm_client client; 36 struct host1x_channel *channel; 37 struct device *dev; 38 struct clk *clk; 39 struct reset_control *rst; 40 41 bool can_use_context; 42 43 /* Platform configuration */ 44 const struct vic_config *config; 45 }; 46 47 static inline struct vic *to_vic(struct tegra_drm_client *client) 48 { 49 return container_of(client, struct vic, client); 50 } 51 52 static void vic_writel(struct vic *vic, u32 value, unsigned int offset) 53 { 54 writel(value, vic->regs + offset); 55 } 56 57 static int vic_boot(struct vic *vic) 58 { 59 u32 fce_ucode_size, fce_bin_data_offset, stream_id; 60 void *hdr; 61 int err = 0; 62 63 if (vic->config->supports_sid && tegra_dev_iommu_get_stream_id(vic->dev, &stream_id)) { 64 u32 value; 65 66 value = TRANSCFG_ATT(1, TRANSCFG_SID_FALCON) | 67 TRANSCFG_ATT(0, TRANSCFG_SID_HW); 68 vic_writel(vic, value, VIC_TFBIF_TRANSCFG); 69 70 /* 71 * STREAMID0 is used for input/output buffers. Initialize it to SID_VIC in case 72 * context isolation is not enabled, and SID_VIC is used for both firmware and 73 * data buffers. 74 * 75 * If context isolation is enabled, it will be overridden by the SETSTREAMID 76 * opcode as part of each job. 77 */ 78 vic_writel(vic, stream_id, VIC_THI_STREAMID0); 79 80 /* STREAMID1 is used for firmware loading. */ 81 vic_writel(vic, stream_id, VIC_THI_STREAMID1); 82 } 83 84 /* setup clockgating registers */ 85 vic_writel(vic, CG_IDLE_CG_DLY_CNT(4) | 86 CG_IDLE_CG_EN | 87 CG_WAKEUP_DLY_CNT(4), 88 NV_PVIC_MISC_PRI_VIC_CG); 89 90 err = falcon_boot(&vic->falcon); 91 if (err < 0) 92 return err; 93 94 hdr = vic->falcon.firmware.virt; 95 fce_bin_data_offset = *(u32 *)(hdr + VIC_UCODE_FCE_DATA_OFFSET); 96 97 /* Old VIC firmware needs kernel help with setting up FCE microcode. */ 98 if (fce_bin_data_offset != 0x0 && fce_bin_data_offset != 0xa5a5a5a5) { 99 hdr = vic->falcon.firmware.virt + 100 *(u32 *)(hdr + VIC_UCODE_FCE_HEADER_OFFSET); 101 fce_ucode_size = *(u32 *)(hdr + FCE_UCODE_SIZE_OFFSET); 102 103 falcon_execute_method(&vic->falcon, VIC_SET_FCE_UCODE_SIZE, 104 fce_ucode_size); 105 falcon_execute_method( 106 &vic->falcon, VIC_SET_FCE_UCODE_OFFSET, 107 (vic->falcon.firmware.iova + fce_bin_data_offset) >> 8); 108 } 109 110 err = falcon_wait_idle(&vic->falcon); 111 if (err < 0) { 112 dev_err(vic->dev, 113 "failed to set application ID and FCE base\n"); 114 return err; 115 } 116 117 return 0; 118 } 119 120 static int vic_init(struct host1x_client *client) 121 { 122 struct tegra_drm_client *drm = host1x_to_drm_client(client); 123 struct drm_device *dev = dev_get_drvdata(client->host); 124 struct tegra_drm *tegra = dev->dev_private; 125 struct vic *vic = to_vic(drm); 126 int err; 127 128 err = host1x_client_iommu_attach(client); 129 if (err < 0 && err != -ENODEV) { 130 dev_err(vic->dev, "failed to attach to domain: %d\n", err); 131 return err; 132 } 133 134 vic->channel = host1x_channel_request(client); 135 if (!vic->channel) { 136 err = -ENOMEM; 137 goto detach; 138 } 139 140 client->syncpts[0] = host1x_syncpt_request(client, 0); 141 if (!client->syncpts[0]) { 142 err = -ENOMEM; 143 goto free_channel; 144 } 145 146 pm_runtime_enable(client->dev); 147 pm_runtime_use_autosuspend(client->dev); 148 pm_runtime_set_autosuspend_delay(client->dev, 500); 149 150 err = tegra_drm_register_client(tegra, drm); 151 if (err < 0) 152 goto disable_rpm; 153 154 /* 155 * Inherit the DMA parameters (such as maximum segment size) from the 156 * parent host1x device. 157 */ 158 client->dev->dma_parms = client->host->dma_parms; 159 160 return 0; 161 162 disable_rpm: 163 pm_runtime_dont_use_autosuspend(client->dev); 164 pm_runtime_force_suspend(client->dev); 165 166 host1x_syncpt_put(client->syncpts[0]); 167 free_channel: 168 host1x_channel_put(vic->channel); 169 detach: 170 host1x_client_iommu_detach(client); 171 172 return err; 173 } 174 175 static int vic_exit(struct host1x_client *client) 176 { 177 struct tegra_drm_client *drm = host1x_to_drm_client(client); 178 struct drm_device *dev = dev_get_drvdata(client->host); 179 struct tegra_drm *tegra = dev->dev_private; 180 struct vic *vic = to_vic(drm); 181 int err; 182 183 /* avoid a dangling pointer just in case this disappears */ 184 client->dev->dma_parms = NULL; 185 186 err = tegra_drm_unregister_client(tegra, drm); 187 if (err < 0) 188 return err; 189 190 pm_runtime_dont_use_autosuspend(client->dev); 191 pm_runtime_force_suspend(client->dev); 192 193 host1x_syncpt_put(client->syncpts[0]); 194 host1x_channel_put(vic->channel); 195 host1x_client_iommu_detach(client); 196 197 vic->channel = NULL; 198 199 if (client->group) { 200 dma_unmap_single(vic->dev, vic->falcon.firmware.phys, 201 vic->falcon.firmware.size, DMA_TO_DEVICE); 202 tegra_drm_free(tegra, vic->falcon.firmware.size, 203 vic->falcon.firmware.virt, 204 vic->falcon.firmware.iova); 205 } else { 206 dma_free_coherent(vic->dev, vic->falcon.firmware.size, 207 vic->falcon.firmware.virt, 208 vic->falcon.firmware.iova); 209 } 210 211 return 0; 212 } 213 214 static const struct host1x_client_ops vic_client_ops = { 215 .init = vic_init, 216 .exit = vic_exit, 217 }; 218 219 static int vic_load_firmware(struct vic *vic) 220 { 221 struct host1x_client *client = &vic->client.base; 222 struct tegra_drm *tegra = vic->client.drm; 223 static DEFINE_MUTEX(lock); 224 u32 fce_bin_data_offset; 225 dma_addr_t iova; 226 size_t size; 227 void *virt; 228 int err; 229 230 mutex_lock(&lock); 231 232 if (vic->falcon.firmware.virt) { 233 err = 0; 234 goto unlock; 235 } 236 237 err = falcon_read_firmware(&vic->falcon, vic->config->firmware); 238 if (err < 0) 239 goto unlock; 240 241 size = vic->falcon.firmware.size; 242 243 if (!client->group) { 244 virt = dma_alloc_coherent(vic->dev, size, &iova, GFP_KERNEL); 245 if (!virt) { 246 err = -ENOMEM; 247 goto unlock; 248 } 249 } else { 250 virt = tegra_drm_alloc(tegra, size, &iova); 251 if (IS_ERR(virt)) { 252 err = PTR_ERR(virt); 253 goto unlock; 254 } 255 } 256 257 vic->falcon.firmware.virt = virt; 258 vic->falcon.firmware.iova = iova; 259 260 err = falcon_load_firmware(&vic->falcon); 261 if (err < 0) 262 goto cleanup; 263 264 /* 265 * In this case we have received an IOVA from the shared domain, so we 266 * need to make sure to get the physical address so that the DMA API 267 * knows what memory pages to flush the cache for. 268 */ 269 if (client->group) { 270 dma_addr_t phys; 271 272 phys = dma_map_single(vic->dev, virt, size, DMA_TO_DEVICE); 273 274 err = dma_mapping_error(vic->dev, phys); 275 if (err < 0) 276 goto cleanup; 277 278 vic->falcon.firmware.phys = phys; 279 } 280 281 /* 282 * Check if firmware is new enough to not require mapping firmware 283 * to data buffer domains. 284 */ 285 fce_bin_data_offset = *(u32 *)(virt + VIC_UCODE_FCE_DATA_OFFSET); 286 287 if (!vic->config->supports_sid) { 288 vic->can_use_context = false; 289 } else if (fce_bin_data_offset != 0x0 && fce_bin_data_offset != 0xa5a5a5a5) { 290 /* 291 * Firmware will access FCE through STREAMID0, so context 292 * isolation cannot be used. 293 */ 294 vic->can_use_context = false; 295 dev_warn_once(vic->dev, "context isolation disabled due to old firmware\n"); 296 } else { 297 vic->can_use_context = true; 298 } 299 300 unlock: 301 mutex_unlock(&lock); 302 return err; 303 304 cleanup: 305 if (!client->group) 306 dma_free_coherent(vic->dev, size, virt, iova); 307 else 308 tegra_drm_free(tegra, size, virt, iova); 309 310 mutex_unlock(&lock); 311 return err; 312 } 313 314 315 static int __maybe_unused vic_runtime_resume(struct device *dev) 316 { 317 struct vic *vic = dev_get_drvdata(dev); 318 int err; 319 320 err = clk_prepare_enable(vic->clk); 321 if (err < 0) 322 return err; 323 324 usleep_range(10, 20); 325 326 err = reset_control_deassert(vic->rst); 327 if (err < 0) 328 goto disable; 329 330 usleep_range(10, 20); 331 332 err = vic_load_firmware(vic); 333 if (err < 0) 334 goto assert; 335 336 err = vic_boot(vic); 337 if (err < 0) 338 goto assert; 339 340 return 0; 341 342 assert: 343 reset_control_assert(vic->rst); 344 disable: 345 clk_disable_unprepare(vic->clk); 346 return err; 347 } 348 349 static int __maybe_unused vic_runtime_suspend(struct device *dev) 350 { 351 struct vic *vic = dev_get_drvdata(dev); 352 int err; 353 354 host1x_channel_stop(vic->channel); 355 356 err = reset_control_assert(vic->rst); 357 if (err < 0) 358 return err; 359 360 usleep_range(2000, 4000); 361 362 clk_disable_unprepare(vic->clk); 363 364 return 0; 365 } 366 367 static int vic_open_channel(struct tegra_drm_client *client, 368 struct tegra_drm_context *context) 369 { 370 struct vic *vic = to_vic(client); 371 372 context->channel = host1x_channel_get(vic->channel); 373 if (!context->channel) 374 return -ENOMEM; 375 376 return 0; 377 } 378 379 static void vic_close_channel(struct tegra_drm_context *context) 380 { 381 host1x_channel_put(context->channel); 382 } 383 384 static int vic_can_use_memory_ctx(struct tegra_drm_client *client, bool *supported) 385 { 386 struct vic *vic = to_vic(client); 387 int err; 388 389 /* This doesn't access HW so it's safe to call without powering up. */ 390 err = vic_load_firmware(vic); 391 if (err < 0) 392 return err; 393 394 *supported = vic->can_use_context; 395 396 return 0; 397 } 398 399 static const struct tegra_drm_client_ops vic_ops = { 400 .open_channel = vic_open_channel, 401 .close_channel = vic_close_channel, 402 .submit = tegra_drm_submit, 403 .get_streamid_offset = tegra_drm_get_streamid_offset_thi, 404 .can_use_memory_ctx = vic_can_use_memory_ctx, 405 }; 406 407 #define NVIDIA_TEGRA_124_VIC_FIRMWARE "nvidia/tegra124/vic03_ucode.bin" 408 409 static const struct vic_config vic_t124_config = { 410 .firmware = NVIDIA_TEGRA_124_VIC_FIRMWARE, 411 .version = 0x40, 412 .supports_sid = false, 413 }; 414 415 #define NVIDIA_TEGRA_210_VIC_FIRMWARE "nvidia/tegra210/vic04_ucode.bin" 416 417 static const struct vic_config vic_t210_config = { 418 .firmware = NVIDIA_TEGRA_210_VIC_FIRMWARE, 419 .version = 0x21, 420 .supports_sid = false, 421 }; 422 423 #define NVIDIA_TEGRA_186_VIC_FIRMWARE "nvidia/tegra186/vic04_ucode.bin" 424 425 static const struct vic_config vic_t186_config = { 426 .firmware = NVIDIA_TEGRA_186_VIC_FIRMWARE, 427 .version = 0x18, 428 .supports_sid = true, 429 }; 430 431 #define NVIDIA_TEGRA_194_VIC_FIRMWARE "nvidia/tegra194/vic.bin" 432 433 static const struct vic_config vic_t194_config = { 434 .firmware = NVIDIA_TEGRA_194_VIC_FIRMWARE, 435 .version = 0x19, 436 .supports_sid = true, 437 }; 438 439 #define NVIDIA_TEGRA_234_VIC_FIRMWARE "nvidia/tegra234/vic.bin" 440 441 static const struct vic_config vic_t234_config = { 442 .firmware = NVIDIA_TEGRA_234_VIC_FIRMWARE, 443 .version = 0x23, 444 .supports_sid = true, 445 }; 446 447 static const struct of_device_id tegra_vic_of_match[] = { 448 { .compatible = "nvidia,tegra124-vic", .data = &vic_t124_config }, 449 { .compatible = "nvidia,tegra210-vic", .data = &vic_t210_config }, 450 { .compatible = "nvidia,tegra186-vic", .data = &vic_t186_config }, 451 { .compatible = "nvidia,tegra194-vic", .data = &vic_t194_config }, 452 { .compatible = "nvidia,tegra234-vic", .data = &vic_t234_config }, 453 { }, 454 }; 455 MODULE_DEVICE_TABLE(of, tegra_vic_of_match); 456 457 static int vic_probe(struct platform_device *pdev) 458 { 459 struct device *dev = &pdev->dev; 460 struct host1x_syncpt **syncpts; 461 struct vic *vic; 462 int err; 463 464 /* inherit DMA mask from host1x parent */ 465 err = dma_coerce_mask_and_coherent(dev, *dev->parent->dma_mask); 466 if (err < 0) { 467 dev_err(&pdev->dev, "failed to set DMA mask: %d\n", err); 468 return err; 469 } 470 471 vic = devm_kzalloc(dev, sizeof(*vic), GFP_KERNEL); 472 if (!vic) 473 return -ENOMEM; 474 475 vic->config = of_device_get_match_data(dev); 476 477 syncpts = devm_kzalloc(dev, sizeof(*syncpts), GFP_KERNEL); 478 if (!syncpts) 479 return -ENOMEM; 480 481 vic->regs = devm_platform_ioremap_resource(pdev, 0); 482 if (IS_ERR(vic->regs)) 483 return PTR_ERR(vic->regs); 484 485 vic->clk = devm_clk_get(dev, NULL); 486 if (IS_ERR(vic->clk)) { 487 dev_err(&pdev->dev, "failed to get clock\n"); 488 return PTR_ERR(vic->clk); 489 } 490 491 err = clk_set_rate(vic->clk, ULONG_MAX); 492 if (err < 0) { 493 dev_err(&pdev->dev, "failed to set clock rate\n"); 494 return err; 495 } 496 497 if (!dev->pm_domain) { 498 vic->rst = devm_reset_control_get(dev, "vic"); 499 if (IS_ERR(vic->rst)) { 500 dev_err(&pdev->dev, "failed to get reset\n"); 501 return PTR_ERR(vic->rst); 502 } 503 } 504 505 vic->falcon.dev = dev; 506 vic->falcon.regs = vic->regs; 507 508 err = falcon_init(&vic->falcon); 509 if (err < 0) 510 return err; 511 512 platform_set_drvdata(pdev, vic); 513 514 INIT_LIST_HEAD(&vic->client.base.list); 515 vic->client.base.ops = &vic_client_ops; 516 vic->client.base.dev = dev; 517 vic->client.base.class = HOST1X_CLASS_VIC; 518 vic->client.base.syncpts = syncpts; 519 vic->client.base.num_syncpts = 1; 520 vic->dev = dev; 521 522 INIT_LIST_HEAD(&vic->client.list); 523 vic->client.version = vic->config->version; 524 vic->client.ops = &vic_ops; 525 526 err = host1x_client_register(&vic->client.base); 527 if (err < 0) { 528 dev_err(dev, "failed to register host1x client: %d\n", err); 529 goto exit_falcon; 530 } 531 532 return 0; 533 534 exit_falcon: 535 falcon_exit(&vic->falcon); 536 537 return err; 538 } 539 540 static void vic_remove(struct platform_device *pdev) 541 { 542 struct vic *vic = platform_get_drvdata(pdev); 543 544 host1x_client_unregister(&vic->client.base); 545 546 falcon_exit(&vic->falcon); 547 } 548 549 static const struct dev_pm_ops vic_pm_ops = { 550 RUNTIME_PM_OPS(vic_runtime_suspend, vic_runtime_resume, NULL) 551 SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume) 552 }; 553 554 struct platform_driver tegra_vic_driver = { 555 .driver = { 556 .name = "tegra-vic", 557 .of_match_table = tegra_vic_of_match, 558 .pm = &vic_pm_ops 559 }, 560 .probe = vic_probe, 561 .remove_new = vic_remove, 562 }; 563 564 #if IS_ENABLED(CONFIG_ARCH_TEGRA_124_SOC) 565 MODULE_FIRMWARE(NVIDIA_TEGRA_124_VIC_FIRMWARE); 566 #endif 567 #if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC) 568 MODULE_FIRMWARE(NVIDIA_TEGRA_210_VIC_FIRMWARE); 569 #endif 570 #if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC) 571 MODULE_FIRMWARE(NVIDIA_TEGRA_186_VIC_FIRMWARE); 572 #endif 573 #if IS_ENABLED(CONFIG_ARCH_TEGRA_194_SOC) 574 MODULE_FIRMWARE(NVIDIA_TEGRA_194_VIC_FIRMWARE); 575 #endif 576 #if IS_ENABLED(CONFIG_ARCH_TEGRA_234_SOC) 577 MODULE_FIRMWARE(NVIDIA_TEGRA_234_VIC_FIRMWARE); 578 #endif 579