Lines Matching refs:op_ctx
1468 * @op_ctx: Target MMU op context pointing at the entry to insert the L1 page
1472 * It is the caller's responsibility to ensure @op_ctx.curr_page points to a
1479 pvr_page_table_l2_insert(struct pvr_mmu_op_context *op_ctx,
1483 &op_ctx->mmu_ctx->page_table_l2;
1486 op_ctx->curr_page.l2_idx);
1492 child_table->parent_idx = op_ctx->curr_page.l2_idx;
1493 l2_table->entries[op_ctx->curr_page.l2_idx] = child_table;
1495 op_ctx->curr_page.l1_table = child_table;
1501 * @op_ctx: Target MMU op context pointing at the L2 entry to remove.
1503 * It is the caller's responsibility to ensure @op_ctx.curr_page points to a
1507 pvr_page_table_l2_remove(struct pvr_mmu_op_context *op_ctx)
1510 &op_ctx->mmu_ctx->page_table_l2;
1513 op_ctx->curr_page.l1_table->parent_idx);
1515 WARN_ON(op_ctx->curr_page.l1_table->parent != l2_table);
1519 l2_table->entries[op_ctx->curr_page.l1_table->parent_idx] = NULL;
1520 op_ctx->curr_page.l1_table->parent_idx = PVR_IDX_INVALID;
1521 op_ctx->curr_page.l1_table->next_free = op_ctx->unmap.l1_free_tables;
1522 op_ctx->unmap.l1_free_tables = op_ctx->curr_page.l1_table;
1523 op_ctx->curr_page.l1_table = NULL;
1531 * @op_ctx: Target MMU op context pointing at the entry to insert the L0 page
1535 * It is the caller's responsibility to ensure @op_ctx.curr_page points to a
1542 pvr_page_table_l1_insert(struct pvr_mmu_op_context *op_ctx,
1546 pvr_page_table_l1_get_entry_raw(op_ctx->curr_page.l1_table,
1547 op_ctx->curr_page.l1_idx);
1552 child_table->parent = op_ctx->curr_page.l1_table;
1553 child_table->parent_idx = op_ctx->curr_page.l1_idx;
1554 op_ctx->curr_page.l1_table->entries[op_ctx->curr_page.l1_idx] = child_table;
1555 ++op_ctx->curr_page.l1_table->entry_count;
1556 op_ctx->curr_page.l0_table = child_table;
1562 * @op_ctx: Target MMU op context pointing at the L1 entry to remove.
1567 * It is the caller's responsibility to ensure @op_ctx.curr_page points to a
1571 pvr_page_table_l1_remove(struct pvr_mmu_op_context *op_ctx)
1574 pvr_page_table_l1_get_entry_raw(op_ctx->curr_page.l0_table->parent,
1575 op_ctx->curr_page.l0_table->parent_idx);
1577 WARN_ON(op_ctx->curr_page.l0_table->parent !=
1578 op_ctx->curr_page.l1_table);
1582 op_ctx->curr_page.l1_table->entries[op_ctx->curr_page.l0_table->parent_idx] = NULL;
1583 op_ctx->curr_page.l0_table->parent_idx = PVR_IDX_INVALID;
1584 op_ctx->curr_page.l0_table->next_free = op_ctx->unmap.l0_free_tables;
1585 op_ctx->unmap.l0_free_tables = op_ctx->curr_page.l0_table;
1586 op_ctx->curr_page.l0_table = NULL;
1588 if (--op_ctx->curr_page.l1_table->entry_count == 0) {
1590 if (op_ctx->curr_page.l1_table->parent_idx != PVR_IDX_INVALID)
1591 pvr_page_table_l2_remove(op_ctx);
1598 * @op_ctx: Target MMU op context pointing at the L0 entry to insert.
1602 * It is the caller's responsibility to ensure @op_ctx.curr_page points to a
1606 pvr_page_table_l0_insert(struct pvr_mmu_op_context *op_ctx,
1610 pvr_page_table_l0_get_entry_raw(op_ctx->curr_page.l0_table,
1611 op_ctx->curr_page.l0_idx);
1620 ++op_ctx->curr_page.l0_table->entry_count;
1626 * @op_ctx: Target MMU op context pointing at the L0 entry to remove.
1631 * It is the caller's responsibility to ensure @op_ctx.curr_page points to a
1635 pvr_page_table_l0_remove(struct pvr_mmu_op_context *op_ctx)
1638 pvr_page_table_l0_get_entry_raw(op_ctx->curr_page.l0_table,
1639 op_ctx->curr_page.l0_idx);
1648 if (--op_ctx->curr_page.l0_table->entry_count == 0) {
1650 if (op_ctx->curr_page.l0_table->parent_idx != PVR_IDX_INVALID)
1651 pvr_page_table_l1_remove(op_ctx);
1723 * @op_ctx: Target MMU op context.
1737 pvr_page_table_l1_get_or_insert(struct pvr_mmu_op_context *op_ctx,
1741 &op_ctx->mmu_ctx->page_table_l2;
1745 op_ctx->curr_page.l2_idx)) {
1746 op_ctx->curr_page.l1_table =
1747 l2_table->entries[op_ctx->curr_page.l2_idx];
1755 table = op_ctx->map.l1_prealloc_tables;
1760 op_ctx->map.l1_prealloc_tables = table->next_free;
1766 pvr_page_table_l2_insert(op_ctx, table);
1774 * @op_ctx: Target MMU op context.
1788 pvr_page_table_l0_get_or_insert(struct pvr_mmu_op_context *op_ctx,
1793 if (pvr_page_table_l1_entry_is_valid(op_ctx->curr_page.l1_table,
1794 op_ctx->curr_page.l1_idx)) {
1795 op_ctx->curr_page.l0_table =
1796 op_ctx->curr_page.l1_table->entries[op_ctx->curr_page.l1_idx];
1804 table = op_ctx->map.l0_prealloc_tables;
1809 op_ctx->map.l0_prealloc_tables = table->next_free;
1815 pvr_page_table_l1_insert(op_ctx, table);
1927 * @op_ctx: Target MMU op context.
1931 pvr_mmu_op_context_require_sync(struct pvr_mmu_op_context *op_ctx,
1934 if (op_ctx->sync_level_required < level)
1935 op_ctx->sync_level_required = level;
1941 * @op_ctx: Target MMU op context.
1946 * value of &op_ctx->sync_level_required as set by
1950 pvr_mmu_op_context_sync_manual(struct pvr_mmu_op_context *op_ctx,
1963 if (op_ctx->curr_page.l0_table)
1964 pvr_page_table_l0_sync(op_ctx->curr_page.l0_table);
1969 if (op_ctx->curr_page.l1_table)
1970 pvr_page_table_l1_sync(op_ctx->curr_page.l1_table);
1975 pvr_page_table_l2_sync(&op_ctx->mmu_ctx->page_table_l2);
1981 * @op_ctx: Target MMU op context.
1984 * If @level is greater than the maximum level recorded by @op_ctx as requiring
1988 * recorded by @op_ctx as requiring a sync operation, that maximum level will be
1993 pvr_mmu_op_context_sync_partial(struct pvr_mmu_op_context *op_ctx,
2004 if (level >= op_ctx->sync_level_required) {
2005 level = op_ctx->sync_level_required;
2006 op_ctx->sync_level_required = PVR_MMU_SYNC_LEVEL_NONE;
2009 pvr_mmu_op_context_sync_manual(op_ctx, level);
2015 * @op_ctx: Target MMU op context.
2018 * that subsequent calls to this function will be no-ops unless @op_ctx is
2022 pvr_mmu_op_context_sync(struct pvr_mmu_op_context *op_ctx)
2024 pvr_mmu_op_context_sync_manual(op_ctx, op_ctx->sync_level_required);
2026 op_ctx->sync_level_required = PVR_MMU_SYNC_LEVEL_NONE;
2033 * @op_ctx: Target MMU op context.
2039 * sync level of @op_ctx as new page tables are created and inserted into their
2054 pvr_mmu_op_context_load_tables(struct pvr_mmu_op_context *op_ctx,
2059 op_ctx->map.l1_prealloc_tables;
2061 op_ctx->map.l0_prealloc_tables;
2066 op_ctx->curr_page.l1_table = NULL;
2069 op_ctx->curr_page.l0_table = NULL;
2073 err = pvr_page_table_l1_get_or_insert(op_ctx, should_create);
2091 err = pvr_page_table_l0_get_or_insert(op_ctx, should_create);
2112 if (l1_head_before != op_ctx->map.l1_prealloc_tables) {
2113 pvr_page_table_l2_remove(op_ctx);
2114 pvr_mmu_op_context_require_sync(op_ctx, PVR_MMU_SYNC_LEVEL_2);
2126 if (l1_head_before != op_ctx->map.l1_prealloc_tables)
2127 pvr_mmu_op_context_require_sync(op_ctx, PVR_MMU_SYNC_LEVEL_2);
2128 else if (l0_head_before != op_ctx->map.l0_prealloc_tables)
2129 pvr_mmu_op_context_require_sync(op_ctx, PVR_MMU_SYNC_LEVEL_1);
2138 * @op_ctx: Target MMU op context.
2151 pvr_mmu_op_context_set_curr_page(struct pvr_mmu_op_context *op_ctx,
2154 pvr_mmu_op_context_sync(op_ctx);
2156 op_ctx->curr_page.l2_idx = pvr_page_table_l2_idx(device_addr);
2157 op_ctx->curr_page.l1_idx = pvr_page_table_l1_idx(device_addr);
2158 op_ctx->curr_page.l0_idx = pvr_page_table_l0_idx(device_addr);
2159 op_ctx->curr_page.l1_table = NULL;
2160 op_ctx->curr_page.l0_table = NULL;
2162 return pvr_mmu_op_context_load_tables(op_ctx, should_create,
2169 * @op_ctx: Target MMU op context.
2174 * the state of the table references in @op_ctx is valid on return. If -%ENXIO
2176 * noted that @op_ctx as a whole will be left in a valid state if -%ENXIO is
2191 pvr_mmu_op_context_next_page(struct pvr_mmu_op_context *op_ctx,
2196 if (++op_ctx->curr_page.l0_idx != ROGUE_MMUCTRL_ENTRIES_PT_VALUE_X)
2199 op_ctx->curr_page.l0_idx = 0;
2202 if (++op_ctx->curr_page.l1_idx != ROGUE_MMUCTRL_ENTRIES_PD_VALUE)
2205 op_ctx->curr_page.l1_idx = 0;
2208 if (++op_ctx->curr_page.l2_idx != ROGUE_MMUCTRL_ENTRIES_PC_VALUE)
2212 * If the pattern continued, we would set &op_ctx->curr_page.l2_idx to
2219 __func__, op_ctx);
2225 pvr_mmu_op_context_sync_partial(op_ctx, load_level_required);
2228 return pvr_mmu_op_context_load_tables(op_ctx, should_create,
2239 * @op_ctx: Target MMU op context pointing at the device-virtual address of the
2250 pvr_page_create(struct pvr_mmu_op_context *op_ctx, dma_addr_t dma_addr,
2254 if (pvr_page_table_l0_entry_is_valid(op_ctx->curr_page.l0_table,
2255 op_ctx->curr_page.l0_idx)) {
2259 pvr_page_table_l0_insert(op_ctx, dma_addr, flags);
2261 pvr_mmu_op_context_require_sync(op_ctx, PVR_MMU_SYNC_LEVEL_0);
2269 * @op_ctx: Target MMU op context.
2272 pvr_page_destroy(struct pvr_mmu_op_context *op_ctx)
2275 if (!pvr_page_table_l0_entry_is_valid(op_ctx->curr_page.l0_table,
2276 op_ctx->curr_page.l0_idx)) {
2281 pvr_page_table_l0_remove(op_ctx);
2283 pvr_mmu_op_context_require_sync(op_ctx, PVR_MMU_SYNC_LEVEL_0);
2288 * @op_ctx: Target MMU op context.
2290 void pvr_mmu_op_context_destroy(struct pvr_mmu_op_context *op_ctx)
2293 op_ctx->sync_level_required != PVR_MMU_SYNC_LEVEL_NONE;
2295 pvr_mmu_op_context_sync(op_ctx);
2298 if (flush_caches && !op_ctx->map.sgt)
2299 pvr_mmu_flush_exec(op_ctx->mmu_ctx->pvr_dev, true);
2301 while (op_ctx->map.l0_prealloc_tables) {
2302 struct pvr_page_table_l0 *tmp = op_ctx->map.l0_prealloc_tables;
2304 op_ctx->map.l0_prealloc_tables =
2305 op_ctx->map.l0_prealloc_tables->next_free;
2309 while (op_ctx->map.l1_prealloc_tables) {
2310 struct pvr_page_table_l1 *tmp = op_ctx->map.l1_prealloc_tables;
2312 op_ctx->map.l1_prealloc_tables =
2313 op_ctx->map.l1_prealloc_tables->next_free;
2317 while (op_ctx->unmap.l0_free_tables) {
2318 struct pvr_page_table_l0 *tmp = op_ctx->unmap.l0_free_tables;
2320 op_ctx->unmap.l0_free_tables =
2321 op_ctx->unmap.l0_free_tables->next_free;
2325 while (op_ctx->unmap.l1_free_tables) {
2326 struct pvr_page_table_l1 *tmp = op_ctx->unmap.l1_free_tables;
2328 op_ctx->unmap.l1_free_tables =
2329 op_ctx->unmap.l1_free_tables->next_free;
2333 kfree(op_ctx);
2355 struct pvr_mmu_op_context *op_ctx =
2356 kzalloc(sizeof(*op_ctx), GFP_KERNEL);
2358 if (!op_ctx)
2361 op_ctx->mmu_ctx = ctx;
2362 op_ctx->map.sgt = sgt;
2363 op_ctx->map.sgt_offset = sgt_offset;
2364 op_ctx->sync_level_required = PVR_MMU_SYNC_LEVEL_NONE;
2394 l1_tmp->next_free = op_ctx->map.l1_prealloc_tables;
2395 op_ctx->map.l1_prealloc_tables = l1_tmp;
2406 l0_tmp->next_free = op_ctx->map.l0_prealloc_tables;
2407 op_ctx->map.l0_prealloc_tables = l0_tmp;
2411 return op_ctx;
2414 pvr_mmu_op_context_destroy(op_ctx);
2422 * @op_ctx: Target MMU op context pointing at the first page to unmap.
2427 * * Any error encountered while advancing @op_ctx.curr_page with
2431 pvr_mmu_op_context_unmap_curr_page(struct pvr_mmu_op_context *op_ctx,
2442 * @op_ctx.curr_page is %NULL, there cannot be a mapped page at
2443 * @op_ctx.curr_page (so skip ahead).
2445 if (op_ctx->curr_page.l0_table)
2446 pvr_page_destroy(op_ctx);
2449 err = pvr_mmu_op_context_next_page(op_ctx, false);
2451 * If the page table tree structure at @op_ctx.curr_page is
2463 pvr_page_destroy(op_ctx);
2471 * @op_ctx: Target MMU op context.
2483 int pvr_mmu_unmap(struct pvr_mmu_op_context *op_ctx, u64 device_addr, u64 size)
2485 int err = pvr_mmu_op_context_set_curr_page(op_ctx, device_addr, false);
2490 return pvr_mmu_op_context_unmap_curr_page(op_ctx,
2497 * @op_ctx: Target MMU op context pointing to the first page that should be
2512 * * Any error encountered while advancing @op_ctx.curr_page with
2516 pvr_mmu_map_sgl(struct pvr_mmu_op_context *op_ctx, struct scatterlist *sgl,
2533 memcpy(&ptr_copy, &op_ctx->curr_page, sizeof(ptr_copy));
2539 err = pvr_page_create(op_ctx, dma_addr, page_flags);
2544 err = pvr_mmu_op_context_next_page(op_ctx, true);
2550 err = pvr_page_create(op_ctx, dma_addr, page_flags);
2558 memcpy(&op_ctx->curr_page, &ptr_copy, sizeof(op_ctx->curr_page));
2559 err = pvr_mmu_op_context_unmap_curr_page(op_ctx, page);
2566 * @op_ctx: Target MMU op context.
2578 int pvr_mmu_map(struct pvr_mmu_op_context *op_ctx, u64 size, u64 flags,
2591 if ((op_ctx->map.sgt_offset | size) & ~PVR_DEVICE_PAGE_MASK)
2594 err = pvr_mmu_op_context_set_curr_page(op_ctx, device_addr, true);
2598 memcpy(&ptr_copy, &op_ctx->curr_page, sizeof(ptr_copy));
2605 for_each_sgtable_dma_sg(op_ctx->map.sgt, sgl, count) {
2609 if (sgl_len <= op_ctx->map.sgt_offset) {
2610 op_ctx->map.sgt_offset -= sgl_len;
2614 sgl_offset = op_ctx->map.sgt_offset;
2617 err = pvr_mmu_map_sgl(op_ctx, sgl, sgl_offset, map_sgl_len,
2626 pvr_mmu_op_context_require_sync(op_ctx, PVR_MMU_SYNC_LEVEL_0);
2628 op_ctx->map.sgt_offset = 0;
2634 err = pvr_mmu_op_context_next_page(op_ctx, true);
2640 memcpy(&op_ctx->curr_page, &ptr_copy, sizeof(op_ctx->curr_page));
2641 pvr_mmu_op_context_unmap_curr_page(op_ctx,