xref: /linux/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h (revision 876f5ebd58a9ac42f48a7ead3d5b274a314e0ace)
1 /* SPDX-License-Identifier: MIT */
2 #ifndef __NVKM_MMU_H__
3 #define __NVKM_MMU_H__
4 #include <core/subdev.h>
5 #include <subdev/gsp.h>
6 
7 struct nvkm_vma {
8 	struct list_head head;
9 	struct rb_node tree;
10 	u64 addr;
11 	u64 size;
12 	bool mapref:1; /* PTs (de)referenced on (un)map (vs pre-allocated). */
13 	bool sparse:1; /* Unmapped PDEs/PTEs will not trigger MMU faults. */
14 #define NVKM_VMA_PAGE_NONE 7
15 	u8   page:3; /* Requested page type (index, or NONE for automatic). */
16 	u8   refd:3; /* Current page type (index, or NONE for unreferenced). */
17 	bool used:1; /* Region allocated. */
18 	bool part:1; /* Region was split from an allocated region by map(). */
19 	bool busy:1; /* Region busy (for temporarily preventing user access). */
20 	bool mapped:1; /* Region contains valid pages. */
21 	bool no_comp:1; /* Force no memory compression. */
22 	struct nvkm_memory *memory; /* Memory currently mapped into VMA. */
23 	struct nvkm_tags *tags; /* Compression tag reference. */
24 };
25 
26 struct nvkm_vmm {
27 	const struct nvkm_vmm_func *func;
28 	struct nvkm_mmu *mmu;
29 	const char *name;
30 	u32 debug;
31 	struct kref kref;
32 
33 	struct {
34 		struct mutex vmm;
35 		struct mutex ref;
36 		struct mutex map;
37 	} mutex;
38 
39 	u64 start;
40 	u64 limit;
41 	struct {
42 		struct {
43 			u64 addr;
44 			u64 size;
45 		} p;
46 		struct {
47 			u64 addr;
48 			u64 size;
49 		} n;
50 		bool raw;
51 	} managed;
52 
53 	struct nvkm_vmm_pt *pd;
54 	struct list_head join;
55 
56 	struct list_head list;
57 	struct rb_root free;
58 	struct rb_root root;
59 
60 	bool bootstrapped;
61 	atomic_t engref[NVKM_SUBDEV_NR];
62 
63 	dma_addr_t null;
64 	void *nullp;
65 
66 	bool replay;
67 
68 	struct {
69 		u64 bar2_pdb;
70 
71 		struct nvkm_gsp_client client;
72 		struct nvkm_gsp_device device;
73 		struct nvkm_gsp_object object;
74 
75 		struct nvkm_vma *rsvd;
76 		bool external;
77 	} rm;
78 };
79 
80 int nvkm_vmm_new(struct nvkm_device *, u64 addr, u64 size, void *argv, u32 argc,
81 		 struct lock_class_key *, const char *name, struct nvkm_vmm **);
82 struct nvkm_vmm *nvkm_vmm_ref(struct nvkm_vmm *);
83 void nvkm_vmm_unref(struct nvkm_vmm **);
84 int nvkm_vmm_boot(struct nvkm_vmm *);
85 int nvkm_vmm_join(struct nvkm_vmm *, struct nvkm_memory *inst);
86 void nvkm_vmm_part(struct nvkm_vmm *, struct nvkm_memory *inst);
87 int nvkm_vmm_get(struct nvkm_vmm *, u8 page, u64 size, struct nvkm_vma **);
88 void nvkm_vmm_put(struct nvkm_vmm *, struct nvkm_vma **);
89 
90 struct nvkm_vmm_map {
91 	struct nvkm_memory *memory;
92 	u64 offset;
93 
94 	struct nvkm_mm_node *mem;
95 	struct scatterlist *sgl;
96 	dma_addr_t *dma;
97 	u64 *pfn;
98 	u64 off;
99 
100 	const struct nvkm_vmm_page *page;
101 
102 	bool no_comp;
103 	struct nvkm_tags *tags;
104 	u64 next;
105 	u64 type;
106 	u64 ctag;
107 };
108 
109 int nvkm_vmm_map(struct nvkm_vmm *, struct nvkm_vma *, void *argv, u32 argc,
110 		 struct nvkm_vmm_map *);
111 void nvkm_vmm_unmap(struct nvkm_vmm *, struct nvkm_vma *);
112 
113 struct nvkm_memory *nvkm_umem_search(struct nvkm_client *, u64);
114 struct nvkm_vmm *nvkm_uvmm_search(struct nvkm_client *, u64 handle);
115 
116 struct nvkm_mmu {
117 	const struct nvkm_mmu_func *func;
118 	struct nvkm_subdev subdev;
119 
120 	u8  dma_bits;
121 
122 	int heap_nr;
123 	struct {
124 #define NVKM_MEM_VRAM                                                      0x01
125 #define NVKM_MEM_HOST                                                      0x02
126 #define NVKM_MEM_COMP                                                      0x04
127 #define NVKM_MEM_DISP                                                      0x08
128 		u8  type;
129 		u64 size;
130 	} heap[4];
131 
132 	int type_nr;
133 	struct {
134 #define NVKM_MEM_KIND                                                      0x10
135 #define NVKM_MEM_MAPPABLE                                                  0x20
136 #define NVKM_MEM_COHERENT                                                  0x40
137 #define NVKM_MEM_UNCACHED                                                  0x80
138 		u8 type;
139 		u8 heap;
140 	} type[16];
141 
142 	struct nvkm_vmm *vmm;
143 
144 	struct {
145 		struct mutex mutex;
146 		struct list_head list;
147 	} ptc, ptp;
148 
149 	struct mutex mutex; /* serialises mmu invalidations */
150 
151 	struct nvkm_device_oclass user;
152 };
153 
154 int nv04_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
155 int nv41_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
156 int nv44_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
157 int nv50_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
158 int g84_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
159 int mcp77_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
160 int gf100_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
161 int gk104_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
162 int gk20a_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
163 int gm200_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
164 int gm20b_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
165 int gp100_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
166 int gp10b_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
167 int gv100_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
168 int tu102_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
169 int gh100_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
170 #endif
171