1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Tegra host1x driver 4 * 5 * Copyright (c) 2010-2013, NVIDIA Corporation. 6 */ 7 8 #include <linux/clk.h> 9 #include <linux/delay.h> 10 #include <linux/dma-mapping.h> 11 #include <linux/io.h> 12 #include <linux/list.h> 13 #include <linux/module.h> 14 #include <linux/of_device.h> 15 #include <linux/of.h> 16 #include <linux/pm_runtime.h> 17 #include <linux/slab.h> 18 19 #include <soc/tegra/common.h> 20 21 #define CREATE_TRACE_POINTS 22 #include <trace/events/host1x.h> 23 #undef CREATE_TRACE_POINTS 24 25 #if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU) 26 #include <asm/dma-iommu.h> 27 #endif 28 29 #include "bus.h" 30 #include "channel.h" 31 #include "context.h" 32 #include "debug.h" 33 #include "dev.h" 34 #include "intr.h" 35 36 #include "hw/host1x01.h" 37 #include "hw/host1x02.h" 38 #include "hw/host1x04.h" 39 #include "hw/host1x05.h" 40 #include "hw/host1x06.h" 41 #include "hw/host1x07.h" 42 #include "hw/host1x08.h" 43 44 void host1x_common_writel(struct host1x *host1x, u32 v, u32 r) 45 { 46 writel(v, host1x->common_regs + r); 47 } 48 49 void host1x_hypervisor_writel(struct host1x *host1x, u32 v, u32 r) 50 { 51 writel(v, host1x->hv_regs + r); 52 } 53 54 u32 host1x_hypervisor_readl(struct host1x *host1x, u32 r) 55 { 56 return readl(host1x->hv_regs + r); 57 } 58 59 void host1x_sync_writel(struct host1x *host1x, u32 v, u32 r) 60 { 61 void __iomem *sync_regs = host1x->regs + host1x->info->sync_offset; 62 63 writel(v, sync_regs + r); 64 } 65 66 u32 host1x_sync_readl(struct host1x *host1x, u32 r) 67 { 68 void __iomem *sync_regs = host1x->regs + host1x->info->sync_offset; 69 70 return readl(sync_regs + r); 71 } 72 73 void host1x_ch_writel(struct host1x_channel *ch, u32 v, u32 r) 74 { 75 writel(v, ch->regs + r); 76 } 77 78 u32 host1x_ch_readl(struct host1x_channel *ch, u32 r) 79 { 80 return readl(ch->regs + r); 81 } 82 83 static const struct host1x_info host1x01_info = { 84 .nb_channels = 8, 85 .nb_pts = 32, 86 .nb_mlocks = 16, 87 .nb_bases = 8, 88 .init = host1x01_init, 89 .sync_offset = 0x3000, 90 .dma_mask = DMA_BIT_MASK(32), 91 .has_wide_gather = false, 92 .has_hypervisor = false, 93 .num_sid_entries = 0, 94 .sid_table = NULL, 95 .reserve_vblank_syncpts = true, 96 }; 97 98 static const struct host1x_info host1x02_info = { 99 .nb_channels = 9, 100 .nb_pts = 32, 101 .nb_mlocks = 16, 102 .nb_bases = 12, 103 .init = host1x02_init, 104 .sync_offset = 0x3000, 105 .dma_mask = DMA_BIT_MASK(32), 106 .has_wide_gather = false, 107 .has_hypervisor = false, 108 .num_sid_entries = 0, 109 .sid_table = NULL, 110 .reserve_vblank_syncpts = true, 111 }; 112 113 static const struct host1x_info host1x04_info = { 114 .nb_channels = 12, 115 .nb_pts = 192, 116 .nb_mlocks = 16, 117 .nb_bases = 64, 118 .init = host1x04_init, 119 .sync_offset = 0x2100, 120 .dma_mask = DMA_BIT_MASK(34), 121 .has_wide_gather = false, 122 .has_hypervisor = false, 123 .num_sid_entries = 0, 124 .sid_table = NULL, 125 .reserve_vblank_syncpts = false, 126 }; 127 128 static const struct host1x_info host1x05_info = { 129 .nb_channels = 14, 130 .nb_pts = 192, 131 .nb_mlocks = 16, 132 .nb_bases = 64, 133 .init = host1x05_init, 134 .sync_offset = 0x2100, 135 .dma_mask = DMA_BIT_MASK(34), 136 .has_wide_gather = false, 137 .has_hypervisor = false, 138 .num_sid_entries = 0, 139 .sid_table = NULL, 140 .reserve_vblank_syncpts = false, 141 }; 142 143 static const struct host1x_sid_entry tegra186_sid_table[] = { 144 { 145 /* VIC */ 146 .base = 0x1af0, 147 .offset = 0x30, 148 .limit = 0x34 149 }, 150 { 151 /* NVDEC */ 152 .base = 0x1b00, 153 .offset = 0x30, 154 .limit = 0x34 155 }, 156 }; 157 158 static const struct host1x_info host1x06_info = { 159 .nb_channels = 63, 160 .nb_pts = 576, 161 .nb_mlocks = 24, 162 .nb_bases = 16, 163 .init = host1x06_init, 164 .sync_offset = 0x0, 165 .dma_mask = DMA_BIT_MASK(40), 166 .has_wide_gather = true, 167 .has_hypervisor = true, 168 .num_sid_entries = ARRAY_SIZE(tegra186_sid_table), 169 .sid_table = tegra186_sid_table, 170 .reserve_vblank_syncpts = false, 171 }; 172 173 static const struct host1x_sid_entry tegra194_sid_table[] = { 174 { 175 /* VIC */ 176 .base = 0x1af0, 177 .offset = 0x30, 178 .limit = 0x34 179 }, 180 { 181 /* NVDEC */ 182 .base = 0x1b00, 183 .offset = 0x30, 184 .limit = 0x34 185 }, 186 { 187 /* NVDEC1 */ 188 .base = 0x1bc0, 189 .offset = 0x30, 190 .limit = 0x34 191 }, 192 }; 193 194 static const struct host1x_info host1x07_info = { 195 .nb_channels = 63, 196 .nb_pts = 704, 197 .nb_mlocks = 32, 198 .nb_bases = 0, 199 .init = host1x07_init, 200 .sync_offset = 0x0, 201 .dma_mask = DMA_BIT_MASK(40), 202 .has_wide_gather = true, 203 .has_hypervisor = true, 204 .num_sid_entries = ARRAY_SIZE(tegra194_sid_table), 205 .sid_table = tegra194_sid_table, 206 .reserve_vblank_syncpts = false, 207 }; 208 209 /* 210 * Tegra234 has two stream ID protection tables, one for setting stream IDs 211 * through the channel path via SETSTREAMID, and one for setting them via 212 * MMIO. We program each engine's data stream ID in the channel path table 213 * and firmware stream ID in the MMIO path table. 214 */ 215 static const struct host1x_sid_entry tegra234_sid_table[] = { 216 { 217 /* VIC channel */ 218 .base = 0x17b8, 219 .offset = 0x30, 220 .limit = 0x30 221 }, 222 { 223 /* VIC MMIO */ 224 .base = 0x1688, 225 .offset = 0x34, 226 .limit = 0x34 227 }, 228 { 229 /* NVDEC channel */ 230 .base = 0x17c8, 231 .offset = 0x30, 232 .limit = 0x30, 233 }, 234 { 235 /* NVDEC MMIO */ 236 .base = 0x1698, 237 .offset = 0x34, 238 .limit = 0x34, 239 }, 240 }; 241 242 static const struct host1x_info host1x08_info = { 243 .nb_channels = 63, 244 .nb_pts = 1024, 245 .nb_mlocks = 24, 246 .nb_bases = 0, 247 .init = host1x08_init, 248 .sync_offset = 0x0, 249 .dma_mask = DMA_BIT_MASK(40), 250 .has_wide_gather = true, 251 .has_hypervisor = true, 252 .has_common = true, 253 .num_sid_entries = ARRAY_SIZE(tegra234_sid_table), 254 .sid_table = tegra234_sid_table, 255 .streamid_vm_table = { 0x1004, 128 }, 256 .classid_vm_table = { 0x1404, 25 }, 257 .mmio_vm_table = { 0x1504, 25 }, 258 .reserve_vblank_syncpts = false, 259 }; 260 261 static const struct of_device_id host1x_of_match[] = { 262 { .compatible = "nvidia,tegra234-host1x", .data = &host1x08_info, }, 263 { .compatible = "nvidia,tegra194-host1x", .data = &host1x07_info, }, 264 { .compatible = "nvidia,tegra186-host1x", .data = &host1x06_info, }, 265 { .compatible = "nvidia,tegra210-host1x", .data = &host1x05_info, }, 266 { .compatible = "nvidia,tegra124-host1x", .data = &host1x04_info, }, 267 { .compatible = "nvidia,tegra114-host1x", .data = &host1x02_info, }, 268 { .compatible = "nvidia,tegra30-host1x", .data = &host1x01_info, }, 269 { .compatible = "nvidia,tegra20-host1x", .data = &host1x01_info, }, 270 { }, 271 }; 272 MODULE_DEVICE_TABLE(of, host1x_of_match); 273 274 static void host1x_setup_virtualization_tables(struct host1x *host) 275 { 276 const struct host1x_info *info = host->info; 277 unsigned int i; 278 279 if (!info->has_hypervisor) 280 return; 281 282 for (i = 0; i < info->num_sid_entries; i++) { 283 const struct host1x_sid_entry *entry = &info->sid_table[i]; 284 285 host1x_hypervisor_writel(host, entry->offset, entry->base); 286 host1x_hypervisor_writel(host, entry->limit, entry->base + 4); 287 } 288 289 for (i = 0; i < info->streamid_vm_table.count; i++) { 290 /* Allow access to all stream IDs to all VMs. */ 291 host1x_hypervisor_writel(host, 0xff, info->streamid_vm_table.base + 4 * i); 292 } 293 294 for (i = 0; i < info->classid_vm_table.count; i++) { 295 /* Allow access to all classes to all VMs. */ 296 host1x_hypervisor_writel(host, 0xff, info->classid_vm_table.base + 4 * i); 297 } 298 299 for (i = 0; i < info->mmio_vm_table.count; i++) { 300 /* Use VM1 (that's us) as originator VMID for engine MMIO accesses. */ 301 host1x_hypervisor_writel(host, 0x1, info->mmio_vm_table.base + 4 * i); 302 } 303 } 304 305 static bool host1x_wants_iommu(struct host1x *host1x) 306 { 307 /* Our IOMMU usage policy doesn't currently play well with GART */ 308 if (of_machine_is_compatible("nvidia,tegra20")) 309 return false; 310 311 /* 312 * If we support addressing a maximum of 32 bits of physical memory 313 * and if the host1x firewall is enabled, there's no need to enable 314 * IOMMU support. This can happen for example on Tegra20, Tegra30 315 * and Tegra114. 316 * 317 * Tegra124 and later can address up to 34 bits of physical memory and 318 * many platforms come equipped with more than 2 GiB of system memory, 319 * which requires crossing the 4 GiB boundary. But there's a catch: on 320 * SoCs before Tegra186 (i.e. Tegra124 and Tegra210), the host1x can 321 * only address up to 32 bits of memory in GATHER opcodes, which means 322 * that command buffers need to either be in the first 2 GiB of system 323 * memory (which could quickly lead to memory exhaustion), or command 324 * buffers need to be treated differently from other buffers (which is 325 * not possible with the current ABI). 326 * 327 * A third option is to use the IOMMU in these cases to make sure all 328 * buffers will be mapped into a 32-bit IOVA space that host1x can 329 * address. This allows all of the system memory to be used and works 330 * within the limitations of the host1x on these SoCs. 331 * 332 * In summary, default to enable IOMMU on Tegra124 and later. For any 333 * of the earlier SoCs, only use the IOMMU for additional safety when 334 * the host1x firewall is disabled. 335 */ 336 if (host1x->info->dma_mask <= DMA_BIT_MASK(32)) { 337 if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL)) 338 return false; 339 } 340 341 return true; 342 } 343 344 static struct iommu_domain *host1x_iommu_attach(struct host1x *host) 345 { 346 struct iommu_domain *domain = iommu_get_domain_for_dev(host->dev); 347 int err; 348 349 #if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU) 350 if (host->dev->archdata.mapping) { 351 struct dma_iommu_mapping *mapping = 352 to_dma_iommu_mapping(host->dev); 353 arm_iommu_detach_device(host->dev); 354 arm_iommu_release_mapping(mapping); 355 356 domain = iommu_get_domain_for_dev(host->dev); 357 } 358 #endif 359 360 /* 361 * We may not always want to enable IOMMU support (for example if the 362 * host1x firewall is already enabled and we don't support addressing 363 * more than 32 bits of physical memory), so check for that first. 364 * 365 * Similarly, if host1x is already attached to an IOMMU (via the DMA 366 * API), don't try to attach again. 367 */ 368 if (!host1x_wants_iommu(host) || domain) 369 return domain; 370 371 host->group = iommu_group_get(host->dev); 372 if (host->group) { 373 struct iommu_domain_geometry *geometry; 374 dma_addr_t start, end; 375 unsigned long order; 376 377 err = iova_cache_get(); 378 if (err < 0) 379 goto put_group; 380 381 host->domain = iommu_domain_alloc(&platform_bus_type); 382 if (!host->domain) { 383 err = -ENOMEM; 384 goto put_cache; 385 } 386 387 err = iommu_attach_group(host->domain, host->group); 388 if (err) { 389 if (err == -ENODEV) 390 err = 0; 391 392 goto free_domain; 393 } 394 395 geometry = &host->domain->geometry; 396 start = geometry->aperture_start & host->info->dma_mask; 397 end = geometry->aperture_end & host->info->dma_mask; 398 399 order = __ffs(host->domain->pgsize_bitmap); 400 init_iova_domain(&host->iova, 1UL << order, start >> order); 401 host->iova_end = end; 402 403 domain = host->domain; 404 } 405 406 return domain; 407 408 free_domain: 409 iommu_domain_free(host->domain); 410 host->domain = NULL; 411 put_cache: 412 iova_cache_put(); 413 put_group: 414 iommu_group_put(host->group); 415 host->group = NULL; 416 417 return ERR_PTR(err); 418 } 419 420 static int host1x_iommu_init(struct host1x *host) 421 { 422 u64 mask = host->info->dma_mask; 423 struct iommu_domain *domain; 424 int err; 425 426 domain = host1x_iommu_attach(host); 427 if (IS_ERR(domain)) { 428 err = PTR_ERR(domain); 429 dev_err(host->dev, "failed to attach to IOMMU: %d\n", err); 430 return err; 431 } 432 433 /* 434 * If we're not behind an IOMMU make sure we don't get push buffers 435 * that are allocated outside of the range addressable by the GATHER 436 * opcode. 437 * 438 * Newer generations of Tegra (Tegra186 and later) support a wide 439 * variant of the GATHER opcode that allows addressing more bits. 440 */ 441 if (!domain && !host->info->has_wide_gather) 442 mask = DMA_BIT_MASK(32); 443 444 err = dma_coerce_mask_and_coherent(host->dev, mask); 445 if (err < 0) { 446 dev_err(host->dev, "failed to set DMA mask: %d\n", err); 447 return err; 448 } 449 450 return 0; 451 } 452 453 static void host1x_iommu_exit(struct host1x *host) 454 { 455 if (host->domain) { 456 put_iova_domain(&host->iova); 457 iommu_detach_group(host->domain, host->group); 458 459 iommu_domain_free(host->domain); 460 host->domain = NULL; 461 462 iova_cache_put(); 463 464 iommu_group_put(host->group); 465 host->group = NULL; 466 } 467 } 468 469 static int host1x_get_resets(struct host1x *host) 470 { 471 int err; 472 473 host->resets[0].id = "mc"; 474 host->resets[1].id = "host1x"; 475 host->nresets = ARRAY_SIZE(host->resets); 476 477 err = devm_reset_control_bulk_get_optional_exclusive_released( 478 host->dev, host->nresets, host->resets); 479 if (err) { 480 dev_err(host->dev, "failed to get reset: %d\n", err); 481 return err; 482 } 483 484 return 0; 485 } 486 487 static int host1x_probe(struct platform_device *pdev) 488 { 489 struct host1x *host; 490 int err; 491 492 host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL); 493 if (!host) 494 return -ENOMEM; 495 496 host->info = of_device_get_match_data(&pdev->dev); 497 498 if (host->info->has_hypervisor) { 499 host->regs = devm_platform_ioremap_resource_byname(pdev, "vm"); 500 if (IS_ERR(host->regs)) 501 return PTR_ERR(host->regs); 502 503 host->hv_regs = devm_platform_ioremap_resource_byname(pdev, "hypervisor"); 504 if (IS_ERR(host->hv_regs)) 505 return PTR_ERR(host->hv_regs); 506 507 if (host->info->has_common) { 508 host->common_regs = devm_platform_ioremap_resource_byname(pdev, "common"); 509 if (IS_ERR(host->common_regs)) 510 return PTR_ERR(host->common_regs); 511 } 512 } else { 513 host->regs = devm_platform_ioremap_resource(pdev, 0); 514 if (IS_ERR(host->regs)) 515 return PTR_ERR(host->regs); 516 } 517 518 host->syncpt_irq = platform_get_irq(pdev, 0); 519 if (host->syncpt_irq < 0) 520 return host->syncpt_irq; 521 522 mutex_init(&host->devices_lock); 523 INIT_LIST_HEAD(&host->devices); 524 INIT_LIST_HEAD(&host->list); 525 host->dev = &pdev->dev; 526 527 /* set common host1x device data */ 528 platform_set_drvdata(pdev, host); 529 530 host->dev->dma_parms = &host->dma_parms; 531 dma_set_max_seg_size(host->dev, UINT_MAX); 532 533 if (host->info->init) { 534 err = host->info->init(host); 535 if (err) 536 return err; 537 } 538 539 host->clk = devm_clk_get(&pdev->dev, NULL); 540 if (IS_ERR(host->clk)) { 541 err = PTR_ERR(host->clk); 542 543 if (err != -EPROBE_DEFER) 544 dev_err(&pdev->dev, "failed to get clock: %d\n", err); 545 546 return err; 547 } 548 549 err = host1x_get_resets(host); 550 if (err) 551 return err; 552 553 host1x_bo_cache_init(&host->cache); 554 555 err = host1x_iommu_init(host); 556 if (err < 0) { 557 dev_err(&pdev->dev, "failed to setup IOMMU: %d\n", err); 558 goto destroy_cache; 559 } 560 561 err = host1x_channel_list_init(&host->channel_list, 562 host->info->nb_channels); 563 if (err) { 564 dev_err(&pdev->dev, "failed to initialize channel list\n"); 565 goto iommu_exit; 566 } 567 568 err = host1x_memory_context_list_init(host); 569 if (err) { 570 dev_err(&pdev->dev, "failed to initialize context list\n"); 571 goto free_channels; 572 } 573 574 err = host1x_syncpt_init(host); 575 if (err) { 576 dev_err(&pdev->dev, "failed to initialize syncpts\n"); 577 goto free_contexts; 578 } 579 580 err = host1x_intr_init(host); 581 if (err) { 582 dev_err(&pdev->dev, "failed to initialize interrupts\n"); 583 goto deinit_syncpt; 584 } 585 586 pm_runtime_enable(&pdev->dev); 587 588 err = devm_tegra_core_dev_init_opp_table_common(&pdev->dev); 589 if (err) 590 goto pm_disable; 591 592 /* the driver's code isn't ready yet for the dynamic RPM */ 593 err = pm_runtime_resume_and_get(&pdev->dev); 594 if (err) 595 goto pm_disable; 596 597 host1x_debug_init(host); 598 599 err = host1x_register(host); 600 if (err < 0) 601 goto deinit_debugfs; 602 603 err = devm_of_platform_populate(&pdev->dev); 604 if (err < 0) 605 goto unregister; 606 607 return 0; 608 609 unregister: 610 host1x_unregister(host); 611 deinit_debugfs: 612 host1x_debug_deinit(host); 613 614 pm_runtime_put_sync_suspend(&pdev->dev); 615 pm_disable: 616 pm_runtime_disable(&pdev->dev); 617 618 host1x_intr_deinit(host); 619 deinit_syncpt: 620 host1x_syncpt_deinit(host); 621 free_contexts: 622 host1x_memory_context_list_free(&host->context_list); 623 free_channels: 624 host1x_channel_list_free(&host->channel_list); 625 iommu_exit: 626 host1x_iommu_exit(host); 627 destroy_cache: 628 host1x_bo_cache_destroy(&host->cache); 629 630 return err; 631 } 632 633 static int host1x_remove(struct platform_device *pdev) 634 { 635 struct host1x *host = platform_get_drvdata(pdev); 636 637 host1x_unregister(host); 638 host1x_debug_deinit(host); 639 640 pm_runtime_force_suspend(&pdev->dev); 641 642 host1x_intr_deinit(host); 643 host1x_syncpt_deinit(host); 644 host1x_memory_context_list_free(&host->context_list); 645 host1x_channel_list_free(&host->channel_list); 646 host1x_iommu_exit(host); 647 host1x_bo_cache_destroy(&host->cache); 648 649 return 0; 650 } 651 652 static int __maybe_unused host1x_runtime_suspend(struct device *dev) 653 { 654 struct host1x *host = dev_get_drvdata(dev); 655 int err; 656 657 host1x_intr_stop(host); 658 host1x_syncpt_save(host); 659 660 err = reset_control_bulk_assert(host->nresets, host->resets); 661 if (err) { 662 dev_err(dev, "failed to assert reset: %d\n", err); 663 goto resume_host1x; 664 } 665 666 usleep_range(1000, 2000); 667 668 clk_disable_unprepare(host->clk); 669 reset_control_bulk_release(host->nresets, host->resets); 670 671 return 0; 672 673 resume_host1x: 674 host1x_setup_virtualization_tables(host); 675 host1x_syncpt_restore(host); 676 host1x_intr_start(host); 677 678 return err; 679 } 680 681 static int __maybe_unused host1x_runtime_resume(struct device *dev) 682 { 683 struct host1x *host = dev_get_drvdata(dev); 684 int err; 685 686 err = reset_control_bulk_acquire(host->nresets, host->resets); 687 if (err) { 688 dev_err(dev, "failed to acquire reset: %d\n", err); 689 return err; 690 } 691 692 err = clk_prepare_enable(host->clk); 693 if (err) { 694 dev_err(dev, "failed to enable clock: %d\n", err); 695 goto release_reset; 696 } 697 698 err = reset_control_bulk_deassert(host->nresets, host->resets); 699 if (err < 0) { 700 dev_err(dev, "failed to deassert reset: %d\n", err); 701 goto disable_clk; 702 } 703 704 host1x_setup_virtualization_tables(host); 705 host1x_syncpt_restore(host); 706 host1x_intr_start(host); 707 708 return 0; 709 710 disable_clk: 711 clk_disable_unprepare(host->clk); 712 release_reset: 713 reset_control_bulk_release(host->nresets, host->resets); 714 715 return err; 716 } 717 718 static const struct dev_pm_ops host1x_pm_ops = { 719 SET_RUNTIME_PM_OPS(host1x_runtime_suspend, host1x_runtime_resume, 720 NULL) 721 /* TODO: add system suspend-resume once driver will be ready for that */ 722 }; 723 724 static struct platform_driver tegra_host1x_driver = { 725 .driver = { 726 .name = "tegra-host1x", 727 .of_match_table = host1x_of_match, 728 .pm = &host1x_pm_ops, 729 }, 730 .probe = host1x_probe, 731 .remove = host1x_remove, 732 }; 733 734 static struct platform_driver * const drivers[] = { 735 &tegra_host1x_driver, 736 &tegra_mipi_driver, 737 }; 738 739 static int __init tegra_host1x_init(void) 740 { 741 int err; 742 743 err = bus_register(&host1x_bus_type); 744 if (err < 0) 745 return err; 746 747 err = platform_register_drivers(drivers, ARRAY_SIZE(drivers)); 748 if (err < 0) 749 bus_unregister(&host1x_bus_type); 750 751 return err; 752 } 753 module_init(tegra_host1x_init); 754 755 static void __exit tegra_host1x_exit(void) 756 { 757 platform_unregister_drivers(drivers, ARRAY_SIZE(drivers)); 758 bus_unregister(&host1x_bus_type); 759 } 760 module_exit(tegra_host1x_exit); 761 762 /** 763 * host1x_get_dma_mask() - query the supported DMA mask for host1x 764 * @host1x: host1x instance 765 * 766 * Note that this returns the supported DMA mask for host1x, which can be 767 * different from the applicable DMA mask under certain circumstances. 768 */ 769 u64 host1x_get_dma_mask(struct host1x *host1x) 770 { 771 return host1x->info->dma_mask; 772 } 773 EXPORT_SYMBOL(host1x_get_dma_mask); 774 775 MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>"); 776 MODULE_AUTHOR("Terje Bergstrom <tbergstrom@nvidia.com>"); 777 MODULE_DESCRIPTION("Host1x driver for Tegra products"); 778 MODULE_LICENSE("GPL"); 779