xref: /linux/drivers/gpu/drm/v3d/v3d_mmu.c (revision 79790b6818e96c58fe2bffee1b418c16e64e7b80)
157692c94SEric Anholt // SPDX-License-Identifier: GPL-2.0+
257692c94SEric Anholt /* Copyright (C) 2017-2018 Broadcom */
357692c94SEric Anholt 
457692c94SEric Anholt /**
557692c94SEric Anholt  * DOC: Broadcom V3D MMU
657692c94SEric Anholt  *
757692c94SEric Anholt  * The V3D 3.x hardware (compared to VC4) now includes an MMU.  It has
857692c94SEric Anholt  * a single level of page tables for the V3D's 4GB address space to
957692c94SEric Anholt  * map to AXI bus addresses, thus it could need up to 4MB of
1057692c94SEric Anholt  * physically contiguous memory to store the PTEs.
1157692c94SEric Anholt  *
1257692c94SEric Anholt  * Because the 4MB of contiguous memory for page tables is precious,
1357692c94SEric Anholt  * and switching between them is expensive, we load all BOs into the
1457692c94SEric Anholt  * same 4GB address space.
1557692c94SEric Anholt  *
1657692c94SEric Anholt  * To protect clients from each other, we should use the GMP to
1757692c94SEric Anholt  * quickly mask out (at 128kb granularity) what pages are available to
1857692c94SEric Anholt  * each client.  This is not yet implemented.
1957692c94SEric Anholt  */
2057692c94SEric Anholt 
2157692c94SEric Anholt #include "v3d_drv.h"
2257692c94SEric Anholt #include "v3d_regs.h"
2357692c94SEric Anholt 
2457692c94SEric Anholt /* Note: All PTEs for the 1MB superpage must be filled with the
2557692c94SEric Anholt  * superpage bit set.
2657692c94SEric Anholt  */
2757692c94SEric Anholt #define V3D_PTE_SUPERPAGE BIT(31)
2857692c94SEric Anholt #define V3D_PTE_WRITEABLE BIT(29)
2957692c94SEric Anholt #define V3D_PTE_VALID BIT(28)
3057692c94SEric Anholt 
v3d_mmu_flush_all(struct v3d_dev * v3d)3157692c94SEric Anholt static int v3d_mmu_flush_all(struct v3d_dev *v3d)
3257692c94SEric Anholt {
3357692c94SEric Anholt 	int ret;
3457692c94SEric Anholt 
3557692c94SEric Anholt 	/* Make sure that another flush isn't already running when we
3657692c94SEric Anholt 	 * start this one.
3757692c94SEric Anholt 	 */
3857692c94SEric Anholt 	ret = wait_for(!(V3D_READ(V3D_MMU_CTL) &
3957692c94SEric Anholt 			 V3D_MMU_CTL_TLB_CLEARING), 100);
4057692c94SEric Anholt 	if (ret)
41bc662528SDaniel Vetter 		dev_err(v3d->drm.dev, "TLB clear wait idle pre-wait failed\n");
4257692c94SEric Anholt 
4357692c94SEric Anholt 	V3D_WRITE(V3D_MMU_CTL, V3D_READ(V3D_MMU_CTL) |
4457692c94SEric Anholt 		  V3D_MMU_CTL_TLB_CLEAR);
4557692c94SEric Anholt 
4657692c94SEric Anholt 	V3D_WRITE(V3D_MMUC_CONTROL,
4757692c94SEric Anholt 		  V3D_MMUC_CONTROL_FLUSH |
4857692c94SEric Anholt 		  V3D_MMUC_CONTROL_ENABLE);
4957692c94SEric Anholt 
5057692c94SEric Anholt 	ret = wait_for(!(V3D_READ(V3D_MMU_CTL) &
5157692c94SEric Anholt 			 V3D_MMU_CTL_TLB_CLEARING), 100);
5257692c94SEric Anholt 	if (ret) {
53bc662528SDaniel Vetter 		dev_err(v3d->drm.dev, "TLB clear wait idle failed\n");
5457692c94SEric Anholt 		return ret;
5557692c94SEric Anholt 	}
5657692c94SEric Anholt 
5757692c94SEric Anholt 	ret = wait_for(!(V3D_READ(V3D_MMUC_CONTROL) &
5857692c94SEric Anholt 			 V3D_MMUC_CONTROL_FLUSHING), 100);
5957692c94SEric Anholt 	if (ret)
60bc662528SDaniel Vetter 		dev_err(v3d->drm.dev, "MMUC flush wait idle failed\n");
6157692c94SEric Anholt 
6257692c94SEric Anholt 	return ret;
6357692c94SEric Anholt }
6457692c94SEric Anholt 
v3d_mmu_set_page_table(struct v3d_dev * v3d)6557692c94SEric Anholt int v3d_mmu_set_page_table(struct v3d_dev *v3d)
6657692c94SEric Anholt {
6757692c94SEric Anholt 	V3D_WRITE(V3D_MMU_PT_PA_BASE, v3d->pt_paddr >> V3D_MMU_PAGE_SHIFT);
6857692c94SEric Anholt 	V3D_WRITE(V3D_MMU_CTL,
6957692c94SEric Anholt 		  V3D_MMU_CTL_ENABLE |
7038c2c791SEric Anholt 		  V3D_MMU_CTL_PT_INVALID_ENABLE |
7157692c94SEric Anholt 		  V3D_MMU_CTL_PT_INVALID_ABORT |
7238c2c791SEric Anholt 		  V3D_MMU_CTL_PT_INVALID_INT |
7357692c94SEric Anholt 		  V3D_MMU_CTL_WRITE_VIOLATION_ABORT |
7438c2c791SEric Anholt 		  V3D_MMU_CTL_WRITE_VIOLATION_INT |
7538c2c791SEric Anholt 		  V3D_MMU_CTL_CAP_EXCEEDED_ABORT |
7638c2c791SEric Anholt 		  V3D_MMU_CTL_CAP_EXCEEDED_INT);
7757692c94SEric Anholt 	V3D_WRITE(V3D_MMU_ILLEGAL_ADDR,
7857692c94SEric Anholt 		  (v3d->mmu_scratch_paddr >> V3D_MMU_PAGE_SHIFT) |
7957692c94SEric Anholt 		  V3D_MMU_ILLEGAL_ADDR_ENABLE);
8057692c94SEric Anholt 	V3D_WRITE(V3D_MMUC_CONTROL, V3D_MMUC_CONTROL_ENABLE);
8157692c94SEric Anholt 
8257692c94SEric Anholt 	return v3d_mmu_flush_all(v3d);
8357692c94SEric Anholt }
8457692c94SEric Anholt 
v3d_mmu_insert_ptes(struct v3d_bo * bo)8557692c94SEric Anholt void v3d_mmu_insert_ptes(struct v3d_bo *bo)
8657692c94SEric Anholt {
8740609d48SEric Anholt 	struct drm_gem_shmem_object *shmem_obj = &bo->base;
8840609d48SEric Anholt 	struct v3d_dev *v3d = to_v3d_dev(shmem_obj->base.dev);
8957692c94SEric Anholt 	u32 page = bo->node.start;
9057692c94SEric Anholt 	u32 page_prot = V3D_PTE_WRITEABLE | V3D_PTE_VALID;
91*e96418daSMarek Szyprowski 	struct sg_dma_page_iter dma_iter;
9257692c94SEric Anholt 
93*e96418daSMarek Szyprowski 	for_each_sgtable_dma_page(shmem_obj->sgt, &dma_iter, 0) {
94*e96418daSMarek Szyprowski 		dma_addr_t dma_addr = sg_page_iter_dma_address(&dma_iter);
95*e96418daSMarek Szyprowski 		u32 page_address = dma_addr >> V3D_MMU_PAGE_SHIFT;
9657692c94SEric Anholt 		u32 pte = page_prot | page_address;
9757692c94SEric Anholt 		u32 i;
9857692c94SEric Anholt 
99*e96418daSMarek Szyprowski 		BUG_ON(page_address + (PAGE_SIZE >> V3D_MMU_PAGE_SHIFT) >=
10057692c94SEric Anholt 		       BIT(24));
101*e96418daSMarek Szyprowski 		for (i = 0; i < PAGE_SIZE >> V3D_MMU_PAGE_SHIFT; i++)
10257692c94SEric Anholt 			v3d->pt[page++] = pte + i;
10357692c94SEric Anholt 	}
10457692c94SEric Anholt 
10557692c94SEric Anholt 	WARN_ON_ONCE(page - bo->node.start !=
10640609d48SEric Anholt 		     shmem_obj->base.size >> V3D_MMU_PAGE_SHIFT);
10757692c94SEric Anholt 
10857692c94SEric Anholt 	if (v3d_mmu_flush_all(v3d))
109bc662528SDaniel Vetter 		dev_err(v3d->drm.dev, "MMU flush timeout\n");
11057692c94SEric Anholt }
11157692c94SEric Anholt 
v3d_mmu_remove_ptes(struct v3d_bo * bo)11257692c94SEric Anholt void v3d_mmu_remove_ptes(struct v3d_bo *bo)
11357692c94SEric Anholt {
11440609d48SEric Anholt 	struct v3d_dev *v3d = to_v3d_dev(bo->base.base.dev);
11540609d48SEric Anholt 	u32 npages = bo->base.base.size >> V3D_MMU_PAGE_SHIFT;
11657692c94SEric Anholt 	u32 page;
11757692c94SEric Anholt 
11857692c94SEric Anholt 	for (page = bo->node.start; page < bo->node.start + npages; page++)
11957692c94SEric Anholt 		v3d->pt[page] = 0;
12057692c94SEric Anholt 
12157692c94SEric Anholt 	if (v3d_mmu_flush_all(v3d))
122bc662528SDaniel Vetter 		dev_err(v3d->drm.dev, "MMU flush timeout\n");
12357692c94SEric Anholt }
124