Lines Matching +full:powervr +full:- +full:gpu
1 // SPDX-License-Identifier: GPL-2.0-only OR MIT
17 #include <linux/dma-mapping.h>
23 #define PVR_MASK_FROM_SIZE(size_) (~((size_) - U64_C(1)))
61 (PVR_DEVICE_PAGE_SHIFT - PVR_SHIFT_FROM_SIZE(SZ_4K)))
64 PVR_MMU_SYNC_LEVEL_NONE = -1,
77 * pvr_mmu_set_flush_flags() - Set MMU cache flush flags for next call to
79 * @pvr_dev: Target PowerVR device.
87 atomic_fetch_or(flags, &pvr_dev->mmu_flush_cache_flags); in pvr_mmu_set_flush_flags()
91 * pvr_mmu_flush_request_all() - Request flush of all MMU caches when
93 * @pvr_dev: Target PowerVR device.
104 * pvr_mmu_flush_exec() - Execute a flush of all MMU caches previously
106 * @pvr_dev: Target PowerVR device.
109 * This function must be called prior to submitting any new GPU job. The flush
115 * flush fails (implying the firmware is not responding) then the GPU device is
120 * * -%EIO if the device is unavailable, or
133 return -EIO; in pvr_mmu_flush_exec()
136 if (!pvr_dev->fw_dev.booted) in pvr_mmu_flush_exec()
139 cmd_mmu_cache_data->cache_flags = in pvr_mmu_flush_exec()
140 atomic_xchg(&pvr_dev->mmu_flush_cache_flags, 0); in pvr_mmu_flush_exec()
142 if (!cmd_mmu_cache_data->cache_flags) in pvr_mmu_flush_exec()
147 pvr_fw_object_get_fw_addr(pvr_dev->fw_dev.mem.mmucache_sync_obj, in pvr_mmu_flush_exec()
148 &cmd_mmu_cache_data->mmu_cache_sync_fw_addr); in pvr_mmu_flush_exec()
149 cmd_mmu_cache_data->mmu_cache_sync_update_value = 0; in pvr_mmu_flush_exec()
166 * reset the GPU and retry. in pvr_mmu_flush_exec()
192 * DOC: PowerVR Virtual Memory Handling
195 * DOC: PowerVR Virtual Memory Handling (constants)
199 * Default value for a u16-based index.
203 #define PVR_IDX_INVALID ((u16)(-1))
213 * Page size of a PowerVR device's integrated MMU. The CPU page size must be
215 * checked at compile-time.
221 * struct pvr_mmu_backing_page - Represents a single page used to back a page
225 * @pvr_dev: The PowerVR device to which this page is associated. **For
237 * pvr_mmu_backing_page_init() - Initialize a MMU backing page.
239 * @pvr_dev: Target PowerVR device.
245 * 3. Map the page to DMA-space.
252 * * -%ENOMEM if allocation of the backing page or mapping of the backing
259 struct device *dev = from_pvr_device(pvr_dev)->dev; in pvr_mmu_backing_page_init()
269 return -ENOMEM; in pvr_mmu_backing_page_init()
273 err = -ENOMEM; in pvr_mmu_backing_page_init()
280 err = -ENOMEM; in pvr_mmu_backing_page_init()
284 page->dma_addr = dma_addr; in pvr_mmu_backing_page_init()
285 page->host_ptr = host_ptr; in pvr_mmu_backing_page_init()
286 page->pvr_dev = pvr_dev; in pvr_mmu_backing_page_init()
287 page->raw_page = raw_page; in pvr_mmu_backing_page_init()
288 kmemleak_alloc(page->host_ptr, PAGE_SIZE, 1, GFP_KERNEL); in pvr_mmu_backing_page_init()
302 * pvr_mmu_backing_page_fini() - Teardown a MMU backing page.
308 * 1. Unmap the page from DMA-space,
314 * It is a no-op to call this function a second (or further) time on any @page.
322 if (!page->pvr_dev) in pvr_mmu_backing_page_fini()
325 dev = from_pvr_device(page->pvr_dev)->dev; in pvr_mmu_backing_page_fini()
327 dma_unmap_page(dev, page->dma_addr, PVR_MMU_BACKING_PAGE_SIZE, in pvr_mmu_backing_page_fini()
330 kmemleak_free(page->host_ptr); in pvr_mmu_backing_page_fini()
331 vunmap(page->host_ptr); in pvr_mmu_backing_page_fini()
333 __free_page(page->raw_page); in pvr_mmu_backing_page_fini()
339 * pvr_mmu_backing_page_sync() - Flush a MMU backing page from the CPU to the
353 struct pvr_device *pvr_dev = page->pvr_dev; in pvr_mmu_backing_page_sync()
363 dev = from_pvr_device(pvr_dev)->dev; in pvr_mmu_backing_page_sync()
365 dma_sync_single_for_device(dev, page->dma_addr, in pvr_mmu_backing_page_sync()
389 * struct pvr_page_table_l2_entry_raw - A single entry in a level 2 page table.
392 * This type is a structure for type-checking purposes. At compile-time, its
397 * .. flat-table::
399 * :stub-columns: 1
401 * * - 31..4
402 * - **Level 1 Page Table Base Address:** Bits 39..12 of the L1
405 * * - 3..2
406 * - *(reserved)*
408 * * - 1
409 * - **Pending:** When valid bit is not set, indicates that a valid
414 * * - 0
415 * - **Valid:** Indicates that the entry contains a valid L1 page
432 * pvr_page_table_l2_entry_raw_set() - Write a valid entry into a raw level 2
447 WRITE_ONCE(entry->val, in pvr_page_table_l2_entry_raw_set()
456 WRITE_ONCE(entry->val, 0); in pvr_page_table_l2_entry_raw_clear()
460 * struct pvr_page_table_l1_entry_raw - A single entry in a level 1 page table.
463 * This type is a structure for type-checking purposes. At compile-time, its
468 * .. flat-table::
470 * :stub-columns: 1
472 * * - 63..41
473 * - *(reserved)*
475 * * - 40
476 * - **Pending:** When valid bit is not set, indicates that a valid entry
480 * * - 39..5
481 * - **Level 0 Page Table Base Address:** The way this value is
491 * .. flat-table::
493 * :header-rows: 1
494 * :stub-columns: 1
496 * * - Page size
497 * - L0 page table base address bits
498 * - Number of L0 page table entries
499 * - Size of L0 page table
501 * * - 4KiB
502 * - 39..12
503 * - 512
504 * - 4KiB
506 * * - 16KiB
507 * - 39..10
508 * - 128
509 * - 1KiB
511 * * - 64KiB
512 * - 39..8
513 * - 32
514 * - 256B
516 * * - 256KiB
517 * - 39..6
518 * - 8
519 * - 64B
521 * * - 1MiB
522 * - 39..5 (4 = '0')
523 * - 2
524 * - 16B
526 * * - 2MiB
527 * - 39..5 (4..3 = '00')
528 * - 1
529 * - 8B
531 * * - 4
532 * - *(reserved)*
534 * * - 3..1
535 * - **Page Size:** Sets the page size, from 4KiB to 2MiB.
537 * * - 0
538 * - **Valid:** Indicates that the entry contains a valid L0 page table.
555 * pvr_page_table_l1_entry_raw_set() - Write a valid entry into a raw level 1
568 WRITE_ONCE(entry->val, in pvr_page_table_l1_entry_raw_set()
573 * The use of a 4K-specific macro here is correct. It is in pvr_page_table_l1_entry_raw_set()
574 * a future optimization to allocate sub-host-page-sized in pvr_page_table_l1_entry_raw_set()
585 WRITE_ONCE(entry->val, 0); in pvr_page_table_l1_entry_raw_clear()
589 * struct pvr_page_table_l0_entry_raw - A single entry in a level 0 page table.
592 * This type is a structure for type-checking purposes. At compile-time, its
597 * .. flat-table::
599 * :stub-columns: 1
601 * * - 63
602 * - *(reserved)*
604 * * - 62
605 * - **PM/FW Protect:** Indicates a protected region which only the
608 * * - 61..40
609 * - **VP Page (High):** Virtual-physical page used for Parameter Manager
617 * * - 39..12
618 * - **Physical Page Address:** The way this value is interpreted depends
626 * .. flat-table::
628 * :header-rows: 1
629 * :stub-columns: 1
631 * * - Page size
632 * - Physical address bits
634 * * - 4KiB
635 * - 39..12
637 * * - 16KiB
638 * - 39..14
640 * * - 64KiB
641 * - 39..16
643 * * - 256KiB
644 * - 39..18
646 * * - 1MiB
647 * - 39..20
649 * * - 2MiB
650 * - 39..21
652 * * - 11..6
653 * - **VP Page (Low):** Continuation of VP Page (High).
655 * * - 5
656 * - **Pending:** When valid bit is not set, indicates that a valid entry
660 * * - 4
661 * - **PM Src:** Set on Parameter Manager (PM) allocated page table
665 * * - 3
666 * - **SLC Bypass Control:** Specifies requests to this page should bypass
669 * * - 2
670 * - **Cache Coherency:** Indicates that the page is coherent (i.e. it
674 * * - 1
675 * - **Read Only:** If set, this bit indicates that the page is read only.
676 * An attempted write to this page would result in a write-protection
679 * * - 0
680 * - **Valid:** Indicates that the entry contains a valid page. If the
691 * struct pvr_page_flags_raw - The configurable flags from a single entry in a
715 * pvr_page_table_l0_entry_raw_set() - Write a valid entry into a raw level 0
733 WRITE_ONCE(entry->val, PVR_PAGE_TABLE_FIELD_PREP(0, PT, VALID, true) | in pvr_page_table_l0_entry_raw_set()
742 WRITE_ONCE(entry->val, 0); in pvr_page_table_l0_entry_raw_clear()
746 * pvr_page_flags_raw_create() - Initialize the flag bits of a raw level 0 page
748 * @read_only: This page is read-only (see: Read Only).
778 * struct pvr_page_table_l2_raw - The raw data of a level 2 page table.
780 * This type is a structure for type-checking purposes. At compile-time, its
791 * struct pvr_page_table_l1_raw - The raw data of a level 1 page table.
793 * This type is a structure for type-checking purposes. At compile-time, its
804 * struct pvr_page_table_l0_raw - The raw data of a level 0 page table.
806 * This type is a structure for type-checking purposes. At compile-time, its
830 * We pre-declare these types because they cross-depend on pointers to each
837 * struct pvr_page_table_l2 - A wrapped level 2 page table.
863 * in this table. This value is essentially a refcount - the table is
871 * pvr_page_table_l2_init() - Initialize a level 2 page table.
873 * @pvr_dev: Target PowerVR device
880 * * Any error encountered while intializing &table->backing_page using
887 return pvr_mmu_backing_page_init(&table->backing_page, pvr_dev); in pvr_page_table_l2_init()
891 * pvr_page_table_l2_fini() - Teardown a level 2 page table.
899 pvr_mmu_backing_page_fini(&table->backing_page); in pvr_page_table_l2_fini()
903 * pvr_page_table_l2_sync() - Flush a level 2 page table from the CPU to the
918 pvr_mmu_backing_page_sync(&table->backing_page, PVR_MMU_SYNC_LEVEL_2_FLAGS); in pvr_page_table_l2_sync()
922 * pvr_page_table_l2_get_raw() - Access the raw equivalent of a mirror level 2
937 return table->backing_page.host_ptr; in pvr_page_table_l2_get_raw()
941 * pvr_page_table_l2_get_entry_raw() - Access an entry from the raw equivalent
961 return &pvr_page_table_l2_get_raw(table)->entries[idx]; in pvr_page_table_l2_get_entry_raw()
965 * pvr_page_table_l2_entry_is_valid() - Check if a level 2 page table entry is
984 * struct pvr_page_table_l1 - A wrapped level 1 page table.
1035 * in this table. This value is essentially a refcount - the table is
1043 * pvr_page_table_l1_init() - Initialize a level 1 page table.
1045 * @pvr_dev: Target PowerVR device
1056 * * Any error encountered while intializing &table->backing_page using
1063 table->parent_idx = PVR_IDX_INVALID; in pvr_page_table_l1_init()
1065 return pvr_mmu_backing_page_init(&table->backing_page, pvr_dev); in pvr_page_table_l1_init()
1069 * pvr_page_table_l1_free() - Teardown a level 1 page table.
1079 pvr_mmu_backing_page_fini(&table->backing_page); in pvr_page_table_l1_free()
1084 * pvr_page_table_l1_sync() - Flush a level 1 page table from the CPU to the
1099 pvr_mmu_backing_page_sync(&table->backing_page, PVR_MMU_SYNC_LEVEL_1_FLAGS); in pvr_page_table_l1_sync()
1103 * pvr_page_table_l1_get_raw() - Access the raw equivalent of a mirror level 1
1118 return table->backing_page.host_ptr; in pvr_page_table_l1_get_raw()
1122 * pvr_page_table_l1_get_entry_raw() - Access an entry from the raw equivalent
1142 return &pvr_page_table_l1_get_raw(table)->entries[idx]; in pvr_page_table_l1_get_entry_raw()
1146 * pvr_page_table_l1_entry_is_valid() - Check if a level 1 page table entry is
1165 * struct pvr_page_table_l0 - A wrapped level 0 page table.
1211 * in this table. This value is essentially a refcount - the table is
1219 * pvr_page_table_l0_init() - Initialize a level 0 page table.
1221 * @pvr_dev: Target PowerVR device
1232 * * Any error encountered while intializing &table->backing_page using
1239 table->parent_idx = PVR_IDX_INVALID; in pvr_page_table_l0_init()
1241 return pvr_mmu_backing_page_init(&table->backing_page, pvr_dev); in pvr_page_table_l0_init()
1245 * pvr_page_table_l0_free() - Teardown a level 0 page table.
1255 pvr_mmu_backing_page_fini(&table->backing_page); in pvr_page_table_l0_free()
1260 * pvr_page_table_l0_sync() - Flush a level 0 page table from the CPU to the
1276 pvr_mmu_backing_page_sync(&table->backing_page, PVR_MMU_SYNC_LEVEL_0_FLAGS); in pvr_page_table_l0_sync()
1280 * pvr_page_table_l0_get_raw() - Access the raw equivalent of a mirror level 0
1295 return table->backing_page.host_ptr; in pvr_page_table_l0_get_raw()
1299 * pvr_page_table_l0_get_entry_raw() - Access an entry from the raw equivalent
1320 return &pvr_page_table_l0_get_raw(table)->entries[idx]; in pvr_page_table_l0_get_entry_raw()
1324 * pvr_page_table_l0_entry_is_valid() - Check if a level 0 page table entry is
1343 * struct pvr_mmu_context - context holding data for operations at page
1355 * struct pvr_page_table_ptr - A reference to a single physical page as indexed
1393 * struct pvr_mmu_op_context - context holding data for individual
1394 * device-virtual mapping operations. Intended for use with a VM bind operation.
1404 * this context - these are currently pinned when initialising
1409 /** @sgt_offset: Start address of the device-virtual mapping. */
1460 * pvr_page_table_l2_insert() - Insert an entry referring to a level 1 page
1477 &op_ctx->mmu_ctx->page_table_l2; in pvr_page_table_l2_insert()
1480 op_ctx->curr_page.l2_idx); in pvr_page_table_l2_insert()
1483 child_table->backing_page.dma_addr); in pvr_page_table_l2_insert()
1485 child_table->parent = l2_table; in pvr_page_table_l2_insert()
1486 child_table->parent_idx = op_ctx->curr_page.l2_idx; in pvr_page_table_l2_insert()
1487 l2_table->entries[op_ctx->curr_page.l2_idx] = child_table; in pvr_page_table_l2_insert()
1488 ++l2_table->entry_count; in pvr_page_table_l2_insert()
1489 op_ctx->curr_page.l1_table = child_table; in pvr_page_table_l2_insert()
1493 * pvr_page_table_l2_remove() - Remove a level 1 page table from a level 2 page
1504 &op_ctx->mmu_ctx->page_table_l2; in pvr_page_table_l2_remove()
1507 op_ctx->curr_page.l1_table->parent_idx); in pvr_page_table_l2_remove()
1509 WARN_ON(op_ctx->curr_page.l1_table->parent != l2_table); in pvr_page_table_l2_remove()
1513 l2_table->entries[op_ctx->curr_page.l1_table->parent_idx] = NULL; in pvr_page_table_l2_remove()
1514 op_ctx->curr_page.l1_table->parent_idx = PVR_IDX_INVALID; in pvr_page_table_l2_remove()
1515 op_ctx->curr_page.l1_table->next_free = op_ctx->unmap.l1_free_tables; in pvr_page_table_l2_remove()
1516 op_ctx->unmap.l1_free_tables = op_ctx->curr_page.l1_table; in pvr_page_table_l2_remove()
1517 op_ctx->curr_page.l1_table = NULL; in pvr_page_table_l2_remove()
1519 --l2_table->entry_count; in pvr_page_table_l2_remove()
1523 * pvr_page_table_l1_insert() - Insert an entry referring to a level 0 page
1540 pvr_page_table_l1_get_entry_raw(op_ctx->curr_page.l1_table, in pvr_page_table_l1_insert()
1541 op_ctx->curr_page.l1_idx); in pvr_page_table_l1_insert()
1544 child_table->backing_page.dma_addr); in pvr_page_table_l1_insert()
1546 child_table->parent = op_ctx->curr_page.l1_table; in pvr_page_table_l1_insert()
1547 child_table->parent_idx = op_ctx->curr_page.l1_idx; in pvr_page_table_l1_insert()
1548 op_ctx->curr_page.l1_table->entries[op_ctx->curr_page.l1_idx] = child_table; in pvr_page_table_l1_insert()
1549 ++op_ctx->curr_page.l1_table->entry_count; in pvr_page_table_l1_insert()
1550 op_ctx->curr_page.l0_table = child_table; in pvr_page_table_l1_insert()
1554 * pvr_page_table_l1_remove() - Remove a level 0 page table from a level 1 page
1568 pvr_page_table_l1_get_entry_raw(op_ctx->curr_page.l0_table->parent, in pvr_page_table_l1_remove()
1569 op_ctx->curr_page.l0_table->parent_idx); in pvr_page_table_l1_remove()
1571 WARN_ON(op_ctx->curr_page.l0_table->parent != in pvr_page_table_l1_remove()
1572 op_ctx->curr_page.l1_table); in pvr_page_table_l1_remove()
1576 op_ctx->curr_page.l1_table->entries[op_ctx->curr_page.l0_table->parent_idx] = NULL; in pvr_page_table_l1_remove()
1577 op_ctx->curr_page.l0_table->parent_idx = PVR_IDX_INVALID; in pvr_page_table_l1_remove()
1578 op_ctx->curr_page.l0_table->next_free = op_ctx->unmap.l0_free_tables; in pvr_page_table_l1_remove()
1579 op_ctx->unmap.l0_free_tables = op_ctx->curr_page.l0_table; in pvr_page_table_l1_remove()
1580 op_ctx->curr_page.l0_table = NULL; in pvr_page_table_l1_remove()
1582 if (--op_ctx->curr_page.l1_table->entry_count == 0) { in pvr_page_table_l1_remove()
1584 if (op_ctx->curr_page.l1_table->parent_idx != PVR_IDX_INVALID) in pvr_page_table_l1_remove()
1590 * pvr_page_table_l0_insert() - Insert an entry referring to a physical page
1604 pvr_page_table_l0_get_entry_raw(op_ctx->curr_page.l0_table, in pvr_page_table_l0_insert()
1605 op_ctx->curr_page.l0_idx); in pvr_page_table_l0_insert()
1610 * There is no entry to set here - we don't keep a mirror of in pvr_page_table_l0_insert()
1614 ++op_ctx->curr_page.l0_table->entry_count; in pvr_page_table_l0_insert()
1618 * pvr_page_table_l0_remove() - Remove a physical page from a level 0 page
1632 pvr_page_table_l0_get_entry_raw(op_ctx->curr_page.l0_table, in pvr_page_table_l0_remove()
1633 op_ctx->curr_page.l0_idx); in pvr_page_table_l0_remove()
1638 * There is no entry to clear here - we don't keep a mirror of in pvr_page_table_l0_remove()
1642 if (--op_ctx->curr_page.l0_table->entry_count == 0) { in pvr_page_table_l0_remove()
1644 if (op_ctx->curr_page.l0_table->parent_idx != PVR_IDX_INVALID) in pvr_page_table_l0_remove()
1654 * pvr_page_table_l2_idx() - Calculate the level 2 page table index for a
1655 * device-virtual address.
1656 * @device_addr: Target device-virtual address.
1658 * This function does not perform any bounds checking - it is the caller's
1673 * pvr_page_table_l1_idx() - Calculate the level 1 page table index for a
1674 * device-virtual address.
1675 * @device_addr: Target device-virtual address.
1677 * This function does not perform any bounds checking - it is the caller's
1692 * pvr_page_table_l0_idx() - Calculate the level 0 page table index for a
1693 * device-virtual address.
1694 * @device_addr: Target device-virtual address.
1696 * This function does not perform any bounds checking - it is the caller's
1711 * DOC: High-level page table operations
1715 * pvr_page_table_l1_get_or_insert() - Retrieves (optionally inserting if
1725 * * -%ENXIO if a level 1 page table would have been inserted.
1735 &op_ctx->mmu_ctx->page_table_l2; in pvr_page_table_l1_get_or_insert()
1739 op_ctx->curr_page.l2_idx)) { in pvr_page_table_l1_get_or_insert()
1740 op_ctx->curr_page.l1_table = in pvr_page_table_l1_get_or_insert()
1741 l2_table->entries[op_ctx->curr_page.l2_idx]; in pvr_page_table_l1_get_or_insert()
1746 return -ENXIO; in pvr_page_table_l1_get_or_insert()
1749 table = op_ctx->map.l1_prealloc_tables; in pvr_page_table_l1_get_or_insert()
1751 return -ENOMEM; in pvr_page_table_l1_get_or_insert()
1754 op_ctx->map.l1_prealloc_tables = table->next_free; in pvr_page_table_l1_get_or_insert()
1755 table->next_free = NULL; in pvr_page_table_l1_get_or_insert()
1766 * pvr_page_table_l0_get_or_insert() - Retrieves (optionally inserting if
1776 * * -%ENXIO if a level 0 page table would have been inserted.
1787 if (pvr_page_table_l1_entry_is_valid(op_ctx->curr_page.l1_table, in pvr_page_table_l0_get_or_insert()
1788 op_ctx->curr_page.l1_idx)) { in pvr_page_table_l0_get_or_insert()
1789 op_ctx->curr_page.l0_table = in pvr_page_table_l0_get_or_insert()
1790 op_ctx->curr_page.l1_table->entries[op_ctx->curr_page.l1_idx]; in pvr_page_table_l0_get_or_insert()
1795 return -ENXIO; in pvr_page_table_l0_get_or_insert()
1798 table = op_ctx->map.l0_prealloc_tables; in pvr_page_table_l0_get_or_insert()
1800 return -ENOMEM; in pvr_page_table_l0_get_or_insert()
1803 op_ctx->map.l0_prealloc_tables = table->next_free; in pvr_page_table_l0_get_or_insert()
1804 table->next_free = NULL; in pvr_page_table_l0_get_or_insert()
1815 * pvr_mmu_context_create() - Create an MMU context.
1820 * * -%ENOMEM if no memory is available,
1829 return ERR_PTR(-ENOMEM); in pvr_mmu_context_create()
1831 err = pvr_page_table_l2_init(&ctx->page_table_l2, pvr_dev); in pvr_mmu_context_create()
1835 ctx->pvr_dev = pvr_dev; in pvr_mmu_context_create()
1841 * pvr_mmu_context_destroy() - Destroy an MMU context.
1846 pvr_page_table_l2_fini(&ctx->page_table_l2); in pvr_mmu_context_destroy()
1851 * pvr_mmu_get_root_table_dma_addr() - Get the DMA address of the root of the
1857 return ctx->page_table_l2.backing_page.dma_addr; in pvr_mmu_get_root_table_dma_addr()
1861 * pvr_page_table_l1_alloc() - Allocate a l1 page_table object.
1866 * * -%ENOMEM if no memory is available,
1878 return ERR_PTR(-ENOMEM); in pvr_page_table_l1_alloc()
1880 err = pvr_page_table_l1_init(table, ctx->pvr_dev); in pvr_page_table_l1_alloc()
1890 * pvr_page_table_l0_alloc() - Allocate a l0 page_table object.
1895 * * -%ENOMEM if no memory is available,
1907 return ERR_PTR(-ENOMEM); in pvr_page_table_l0_alloc()
1909 err = pvr_page_table_l0_init(table, ctx->pvr_dev); in pvr_page_table_l0_alloc()
1919 * pvr_mmu_op_context_require_sync() - Mark an MMU op context as requiring a
1928 if (op_ctx->sync_level_required < level) in pvr_mmu_op_context_require_sync()
1929 op_ctx->sync_level_required = level; in pvr_mmu_op_context_require_sync()
1933 * pvr_mmu_op_context_sync_manual() - Trigger a sync of some or all of the
1940 * value of &op_ctx->sync_level_required as set by
1957 if (op_ctx->curr_page.l0_table) in pvr_mmu_op_context_sync_manual()
1958 pvr_page_table_l0_sync(op_ctx->curr_page.l0_table); in pvr_mmu_op_context_sync_manual()
1963 if (op_ctx->curr_page.l1_table) in pvr_mmu_op_context_sync_manual()
1964 pvr_page_table_l1_sync(op_ctx->curr_page.l1_table); in pvr_mmu_op_context_sync_manual()
1969 pvr_page_table_l2_sync(&op_ctx->mmu_ctx->page_table_l2); in pvr_mmu_op_context_sync_manual()
1973 * pvr_mmu_op_context_sync_partial() - Trigger a sync of some or all of the
1998 if (level >= op_ctx->sync_level_required) { in pvr_mmu_op_context_sync_partial()
1999 level = op_ctx->sync_level_required; in pvr_mmu_op_context_sync_partial()
2000 op_ctx->sync_level_required = PVR_MMU_SYNC_LEVEL_NONE; in pvr_mmu_op_context_sync_partial()
2007 * pvr_mmu_op_context_sync() - Trigger a sync of every page table referenced by
2012 * that subsequent calls to this function will be no-ops unless @op_ctx is
2018 pvr_mmu_op_context_sync_manual(op_ctx, op_ctx->sync_level_required); in pvr_mmu_op_context_sync()
2020 op_ctx->sync_level_required = PVR_MMU_SYNC_LEVEL_NONE; in pvr_mmu_op_context_sync()
2024 * pvr_mmu_op_context_load_tables() - Load pointers to tables in each level of
2043 * @load_level_required >= 1 except -%ENXIO, or
2045 * @load_level_required >= 0 except -%ENXIO.
2053 op_ctx->map.l1_prealloc_tables; in pvr_mmu_op_context_load_tables()
2055 op_ctx->map.l0_prealloc_tables; in pvr_mmu_op_context_load_tables()
2060 op_ctx->curr_page.l1_table = NULL; in pvr_mmu_op_context_load_tables()
2063 op_ctx->curr_page.l0_table = NULL; in pvr_mmu_op_context_load_tables()
2073 * -%ENXIO if @should_create is %false, there is no in pvr_mmu_op_context_load_tables()
2076 if (err == -ENXIO) in pvr_mmu_op_context_load_tables()
2091 * -%ENXIO if @should_create is %false, there is no in pvr_mmu_op_context_load_tables()
2094 if (err == -ENXIO) in pvr_mmu_op_context_load_tables()
2106 if (l1_head_before != op_ctx->map.l1_prealloc_tables) { in pvr_mmu_op_context_load_tables()
2120 if (l1_head_before != op_ctx->map.l1_prealloc_tables) in pvr_mmu_op_context_load_tables()
2122 else if (l0_head_before != op_ctx->map.l0_prealloc_tables) in pvr_mmu_op_context_load_tables()
2129 * pvr_mmu_op_context_set_curr_page() - Reassign the current page of an MMU op
2150 op_ctx->curr_page.l2_idx = pvr_page_table_l2_idx(device_addr); in pvr_mmu_op_context_set_curr_page()
2151 op_ctx->curr_page.l1_idx = pvr_page_table_l1_idx(device_addr); in pvr_mmu_op_context_set_curr_page()
2152 op_ctx->curr_page.l0_idx = pvr_page_table_l0_idx(device_addr); in pvr_mmu_op_context_set_curr_page()
2153 op_ctx->curr_page.l1_table = NULL; in pvr_mmu_op_context_set_curr_page()
2154 op_ctx->curr_page.l0_table = NULL; in pvr_mmu_op_context_set_curr_page()
2161 * pvr_mmu_op_context_next_page() - Advance the current page of an MMU op
2168 * the state of the table references in @op_ctx is valid on return. If -%ENXIO
2170 * noted that @op_ctx as a whole will be left in a valid state if -%ENXIO is
2172 * are invalid by comparing them to %NULL. Only &@ptr->l2_table is guaranteed
2177 * * -%EPERM if the operation would wrap at the top of the page table
2179 * * -%ENXIO if @should_create is %false and a page table of any level would
2190 if (++op_ctx->curr_page.l0_idx != ROGUE_MMUCTRL_ENTRIES_PT_VALUE_X) in pvr_mmu_op_context_next_page()
2193 op_ctx->curr_page.l0_idx = 0; in pvr_mmu_op_context_next_page()
2196 if (++op_ctx->curr_page.l1_idx != ROGUE_MMUCTRL_ENTRIES_PD_VALUE) in pvr_mmu_op_context_next_page()
2199 op_ctx->curr_page.l1_idx = 0; in pvr_mmu_op_context_next_page()
2202 if (++op_ctx->curr_page.l2_idx != ROGUE_MMUCTRL_ENTRIES_PC_VALUE) in pvr_mmu_op_context_next_page()
2206 * If the pattern continued, we would set &op_ctx->curr_page.l2_idx to in pvr_mmu_op_context_next_page()
2214 return -EPERM; in pvr_mmu_op_context_next_page()
2231 * pvr_page_create() - Create a device-virtual memory page and insert it into
2233 * @op_ctx: Target MMU op context pointing at the device-virtual address of the
2241 * * -%EEXIST if the requested page already exists.
2248 if (pvr_page_table_l0_entry_is_valid(op_ctx->curr_page.l0_table, in pvr_page_create()
2249 op_ctx->curr_page.l0_idx)) { in pvr_page_create()
2250 return -EEXIST; in pvr_page_create()
2261 * pvr_page_destroy() - Destroy a device page after removing it from its
2269 if (!pvr_page_table_l0_entry_is_valid(op_ctx->curr_page.l0_table, in pvr_page_destroy()
2270 op_ctx->curr_page.l0_idx)) { in pvr_page_destroy()
2281 * pvr_mmu_op_context_destroy() - Destroy an MMU op context.
2287 op_ctx->sync_level_required != PVR_MMU_SYNC_LEVEL_NONE; in pvr_mmu_op_context_destroy()
2292 if (flush_caches && !op_ctx->map.sgt) in pvr_mmu_op_context_destroy()
2293 pvr_mmu_flush_exec(op_ctx->mmu_ctx->pvr_dev, true); in pvr_mmu_op_context_destroy()
2295 while (op_ctx->map.l0_prealloc_tables) { in pvr_mmu_op_context_destroy()
2296 struct pvr_page_table_l0 *tmp = op_ctx->map.l0_prealloc_tables; in pvr_mmu_op_context_destroy()
2298 op_ctx->map.l0_prealloc_tables = in pvr_mmu_op_context_destroy()
2299 op_ctx->map.l0_prealloc_tables->next_free; in pvr_mmu_op_context_destroy()
2303 while (op_ctx->map.l1_prealloc_tables) { in pvr_mmu_op_context_destroy()
2304 struct pvr_page_table_l1 *tmp = op_ctx->map.l1_prealloc_tables; in pvr_mmu_op_context_destroy()
2306 op_ctx->map.l1_prealloc_tables = in pvr_mmu_op_context_destroy()
2307 op_ctx->map.l1_prealloc_tables->next_free; in pvr_mmu_op_context_destroy()
2311 while (op_ctx->unmap.l0_free_tables) { in pvr_mmu_op_context_destroy()
2312 struct pvr_page_table_l0 *tmp = op_ctx->unmap.l0_free_tables; in pvr_mmu_op_context_destroy()
2314 op_ctx->unmap.l0_free_tables = in pvr_mmu_op_context_destroy()
2315 op_ctx->unmap.l0_free_tables->next_free; in pvr_mmu_op_context_destroy()
2319 while (op_ctx->unmap.l1_free_tables) { in pvr_mmu_op_context_destroy()
2320 struct pvr_page_table_l1 *tmp = op_ctx->unmap.l1_free_tables; in pvr_mmu_op_context_destroy()
2322 op_ctx->unmap.l1_free_tables = in pvr_mmu_op_context_destroy()
2323 op_ctx->unmap.l1_free_tables->next_free; in pvr_mmu_op_context_destroy()
2331 * pvr_mmu_op_context_create() - Create an MMU op context.
2334 * @sgt_offset: Start offset of the requested device-virtual memory mapping.
2335 * @size: Size in bytes of the requested device-virtual memory mapping. For an
2340 * * -%ENOMEM if no memory is available,
2353 return ERR_PTR(-ENOMEM); in pvr_mmu_op_context_create()
2355 op_ctx->mmu_ctx = ctx; in pvr_mmu_op_context_create()
2356 op_ctx->map.sgt = sgt; in pvr_mmu_op_context_create()
2357 op_ctx->map.sgt_offset = sgt_offset; in pvr_mmu_op_context_create()
2358 op_ctx->sync_level_required = PVR_MMU_SYNC_LEVEL_NONE; in pvr_mmu_op_context_create()
2370 const u32 l1_count = l1_end_idx - l1_start_idx + 1; in pvr_mmu_op_context_create()
2373 const u32 l0_count = l0_end_idx - l0_start_idx + 1; in pvr_mmu_op_context_create()
2388 l1_tmp->next_free = op_ctx->map.l1_prealloc_tables; in pvr_mmu_op_context_create()
2389 op_ctx->map.l1_prealloc_tables = l1_tmp; in pvr_mmu_op_context_create()
2400 l0_tmp->next_free = op_ctx->map.l0_prealloc_tables; in pvr_mmu_op_context_create()
2401 op_ctx->map.l0_prealloc_tables = l0_tmp; in pvr_mmu_op_context_create()
2414 * pvr_mmu_op_context_unmap_curr_page() - Unmap pages from a memory context
2422 * pvr_mmu_op_context_next_page() (except -%ENXIO).
2439 if (op_ctx->curr_page.l0_table) in pvr_mmu_op_context_unmap_curr_page()
2452 if (err == -ENXIO) in pvr_mmu_op_context_unmap_curr_page()
2464 * pvr_mmu_unmap() - Unmap pages from a memory context.
2466 * @device_addr: First device-virtual address to unmap.
2469 * The total amount of device-virtual memory unmapped is
2489 * pvr_mmu_map_sgl() - Map part of a scatter-gather table entry to
2490 * device-virtual memory.
2493 * @sgl: Target scatter-gather table entry.
2495 * from @sgl which is CPU page-aligned.
2496 * @size: Size of the memory to be mapped in bytes. Must be a non-zero multiple
2498 * @page_flags: Page options to be applied to every device-virtual memory page
2503 * * -%EINVAL if the range specified by @offset and @size is not completely
2520 if (size > dma_len || offset > dma_len - size) in pvr_mmu_map_sgl()
2521 return -EINVAL; in pvr_mmu_map_sgl()
2527 memcpy(&ptr_copy, &op_ctx->curr_page, sizeof(ptr_copy)); in pvr_mmu_map_sgl()
2552 memcpy(&op_ctx->curr_page, &ptr_copy, sizeof(op_ctx->curr_page)); in pvr_mmu_map_sgl()
2559 * pvr_mmu_map() - Map an object's virtual memory to physical memory.
2561 * @size: Size of memory to be mapped in bytes. Must be a non-zero multiple
2564 * @device_addr: Virtual device address to map to. Must be device page-aligned.
2585 if ((op_ctx->map.sgt_offset | size) & ~PVR_DEVICE_PAGE_MASK) in pvr_mmu_map()
2586 return -EINVAL; in pvr_mmu_map()
2590 return -EINVAL; in pvr_mmu_map()
2592 memcpy(&ptr_copy, &op_ctx->curr_page, sizeof(ptr_copy)); in pvr_mmu_map()
2599 for_each_sgtable_dma_sg(op_ctx->map.sgt, sgl, count) { in pvr_mmu_map()
2603 if (sgl_len <= op_ctx->map.sgt_offset) { in pvr_mmu_map()
2604 op_ctx->map.sgt_offset -= sgl_len; in pvr_mmu_map()
2608 sgl_offset = op_ctx->map.sgt_offset; in pvr_mmu_map()
2609 map_sgl_len = min_t(u64, sgl_len - sgl_offset, size - mapped_size); in pvr_mmu_map()
2622 op_ctx->map.sgt_offset = 0; in pvr_mmu_map()
2634 memcpy(&op_ctx->curr_page, &ptr_copy, sizeof(op_ctx->curr_page)); in pvr_mmu_map()