xref: /linux/drivers/gpu/drm/v3d/v3d_mmu.c (revision bfb921b2a9d5d1123d1d10b196a39db629ddef87)
1 // SPDX-License-Identifier: GPL-2.0+
2 /* Copyright (C) 2017-2018 Broadcom */
3 
4 /**
5  * DOC: Broadcom V3D MMU
6  *
7  * The V3D 3.x hardware (compared to VC4) now includes an MMU.  It has
8  * a single level of page tables for the V3D's 4GB address space to
9  * map to AXI bus addresses, thus it could need up to 4MB of
10  * physically contiguous memory to store the PTEs.
11  *
12  * Because the 4MB of contiguous memory for page tables is precious,
13  * and switching between them is expensive, we load all BOs into the
14  * same 4GB address space.
15  *
16  * To protect clients from each other, we should use the GMP to
17  * quickly mask out (at 128kb granularity) what pages are available to
18  * each client.  This is not yet implemented.
19  */
20 
21 #include "v3d_drv.h"
22 #include "v3d_regs.h"
23 
24 /* Note: All PTEs for the 1MB superpage must be filled with the
25  * superpage bit set.
26  */
27 #define V3D_PTE_SUPERPAGE BIT(31)
28 #define V3D_PTE_WRITEABLE BIT(29)
29 #define V3D_PTE_VALID BIT(28)
30 
31 static int v3d_mmu_flush_all(struct v3d_dev *v3d)
32 {
33 	int ret;
34 
35 	/* Make sure that another flush isn't already running when we
36 	 * start this one.
37 	 */
38 	ret = wait_for(!(V3D_READ(V3D_MMU_CTL) &
39 			 V3D_MMU_CTL_TLB_CLEARING), 100);
40 	if (ret)
41 		dev_err(v3d->drm.dev, "TLB clear wait idle pre-wait failed\n");
42 
43 	V3D_WRITE(V3D_MMU_CTL, V3D_READ(V3D_MMU_CTL) |
44 		  V3D_MMU_CTL_TLB_CLEAR);
45 
46 	V3D_WRITE(V3D_MMUC_CONTROL,
47 		  V3D_MMUC_CONTROL_FLUSH |
48 		  V3D_MMUC_CONTROL_ENABLE);
49 
50 	ret = wait_for(!(V3D_READ(V3D_MMU_CTL) &
51 			 V3D_MMU_CTL_TLB_CLEARING), 100);
52 	if (ret) {
53 		dev_err(v3d->drm.dev, "TLB clear wait idle failed\n");
54 		return ret;
55 	}
56 
57 	ret = wait_for(!(V3D_READ(V3D_MMUC_CONTROL) &
58 			 V3D_MMUC_CONTROL_FLUSHING), 100);
59 	if (ret)
60 		dev_err(v3d->drm.dev, "MMUC flush wait idle failed\n");
61 
62 	return ret;
63 }
64 
65 int v3d_mmu_set_page_table(struct v3d_dev *v3d)
66 {
67 	V3D_WRITE(V3D_MMU_PT_PA_BASE, v3d->pt_paddr >> V3D_MMU_PAGE_SHIFT);
68 	V3D_WRITE(V3D_MMU_CTL,
69 		  V3D_MMU_CTL_ENABLE |
70 		  V3D_MMU_CTL_PT_INVALID_ENABLE |
71 		  V3D_MMU_CTL_PT_INVALID_ABORT |
72 		  V3D_MMU_CTL_PT_INVALID_INT |
73 		  V3D_MMU_CTL_WRITE_VIOLATION_ABORT |
74 		  V3D_MMU_CTL_WRITE_VIOLATION_INT |
75 		  V3D_MMU_CTL_CAP_EXCEEDED_ABORT |
76 		  V3D_MMU_CTL_CAP_EXCEEDED_INT);
77 	V3D_WRITE(V3D_MMU_ILLEGAL_ADDR,
78 		  (v3d->mmu_scratch_paddr >> V3D_MMU_PAGE_SHIFT) |
79 		  V3D_MMU_ILLEGAL_ADDR_ENABLE);
80 	V3D_WRITE(V3D_MMUC_CONTROL, V3D_MMUC_CONTROL_ENABLE);
81 
82 	return v3d_mmu_flush_all(v3d);
83 }
84 
85 void v3d_mmu_insert_ptes(struct v3d_bo *bo)
86 {
87 	struct drm_gem_shmem_object *shmem_obj = &bo->base;
88 	struct v3d_dev *v3d = to_v3d_dev(shmem_obj->base.dev);
89 	u32 page = bo->node.start;
90 	u32 page_prot = V3D_PTE_WRITEABLE | V3D_PTE_VALID;
91 	struct sg_dma_page_iter dma_iter;
92 
93 	for_each_sgtable_dma_page(shmem_obj->sgt, &dma_iter, 0) {
94 		dma_addr_t dma_addr = sg_page_iter_dma_address(&dma_iter);
95 		u32 page_address = dma_addr >> V3D_MMU_PAGE_SHIFT;
96 		u32 pte = page_prot | page_address;
97 		u32 i;
98 
99 		BUG_ON(page_address + (PAGE_SIZE >> V3D_MMU_PAGE_SHIFT) >=
100 		       BIT(24));
101 		for (i = 0; i < PAGE_SIZE >> V3D_MMU_PAGE_SHIFT; i++)
102 			v3d->pt[page++] = pte + i;
103 	}
104 
105 	WARN_ON_ONCE(page - bo->node.start !=
106 		     shmem_obj->base.size >> V3D_MMU_PAGE_SHIFT);
107 
108 	if (v3d_mmu_flush_all(v3d))
109 		dev_err(v3d->drm.dev, "MMU flush timeout\n");
110 }
111 
112 void v3d_mmu_remove_ptes(struct v3d_bo *bo)
113 {
114 	struct v3d_dev *v3d = to_v3d_dev(bo->base.base.dev);
115 	u32 npages = bo->base.base.size >> V3D_MMU_PAGE_SHIFT;
116 	u32 page;
117 
118 	for (page = bo->node.start; page < bo->node.start + npages; page++)
119 		v3d->pt[page] = 0;
120 
121 	if (v3d_mmu_flush_all(v3d))
122 		dev_err(v3d->drm.dev, "MMU flush timeout\n");
123 }
124