xref: /linux/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h (revision 34dc1baba215b826e454b8d19e4f24adbeb7d00d)
1 /* SPDX-License-Identifier: MIT */
2 #ifndef __NVKM_MMU_H__
3 #define __NVKM_MMU_H__
4 #include <core/subdev.h>
5 
6 struct nvkm_vma {
7 	struct list_head head;
8 	struct rb_node tree;
9 	u64 addr;
10 	u64 size:50;
11 	bool mapref:1; /* PTs (de)referenced on (un)map (vs pre-allocated). */
12 	bool sparse:1; /* Unmapped PDEs/PTEs will not trigger MMU faults. */
13 #define NVKM_VMA_PAGE_NONE 7
14 	u8   page:3; /* Requested page type (index, or NONE for automatic). */
15 	u8   refd:3; /* Current page type (index, or NONE for unreferenced). */
16 	bool used:1; /* Region allocated. */
17 	bool part:1; /* Region was split from an allocated region by map(). */
18 	bool busy:1; /* Region busy (for temporarily preventing user access). */
19 	bool mapped:1; /* Region contains valid pages. */
20 	bool no_comp:1; /* Force no memory compression. */
21 	struct nvkm_memory *memory; /* Memory currently mapped into VMA. */
22 	struct nvkm_tags *tags; /* Compression tag reference. */
23 };
24 
25 struct nvkm_vmm {
26 	const struct nvkm_vmm_func *func;
27 	struct nvkm_mmu *mmu;
28 	const char *name;
29 	u32 debug;
30 	struct kref kref;
31 
32 	struct {
33 		struct mutex vmm;
34 		struct mutex ref;
35 		struct mutex map;
36 	} mutex;
37 
38 	u64 start;
39 	u64 limit;
40 	struct {
41 		struct {
42 			u64 addr;
43 			u64 size;
44 		} p;
45 		struct {
46 			u64 addr;
47 			u64 size;
48 		} n;
49 		bool raw;
50 	} managed;
51 
52 	struct nvkm_vmm_pt *pd;
53 	struct list_head join;
54 
55 	struct list_head list;
56 	struct rb_root free;
57 	struct rb_root root;
58 
59 	bool bootstrapped;
60 	atomic_t engref[NVKM_SUBDEV_NR];
61 
62 	dma_addr_t null;
63 	void *nullp;
64 
65 	bool replay;
66 };
67 
68 int nvkm_vmm_new(struct nvkm_device *, u64 addr, u64 size, void *argv, u32 argc,
69 		 struct lock_class_key *, const char *name, struct nvkm_vmm **);
70 struct nvkm_vmm *nvkm_vmm_ref(struct nvkm_vmm *);
71 void nvkm_vmm_unref(struct nvkm_vmm **);
72 int nvkm_vmm_boot(struct nvkm_vmm *);
73 int nvkm_vmm_join(struct nvkm_vmm *, struct nvkm_memory *inst);
74 void nvkm_vmm_part(struct nvkm_vmm *, struct nvkm_memory *inst);
75 int nvkm_vmm_get(struct nvkm_vmm *, u8 page, u64 size, struct nvkm_vma **);
76 void nvkm_vmm_put(struct nvkm_vmm *, struct nvkm_vma **);
77 
78 struct nvkm_vmm_map {
79 	struct nvkm_memory *memory;
80 	u64 offset;
81 
82 	struct nvkm_mm_node *mem;
83 	struct scatterlist *sgl;
84 	dma_addr_t *dma;
85 	u64 *pfn;
86 	u64 off;
87 
88 	const struct nvkm_vmm_page *page;
89 
90 	bool no_comp;
91 	struct nvkm_tags *tags;
92 	u64 next;
93 	u64 type;
94 	u64 ctag;
95 };
96 
97 int nvkm_vmm_map(struct nvkm_vmm *, struct nvkm_vma *, void *argv, u32 argc,
98 		 struct nvkm_vmm_map *);
99 void nvkm_vmm_unmap(struct nvkm_vmm *, struct nvkm_vma *);
100 
101 struct nvkm_memory *nvkm_umem_search(struct nvkm_client *, u64);
102 struct nvkm_vmm *nvkm_uvmm_search(struct nvkm_client *, u64 handle);
103 
104 struct nvkm_mmu {
105 	const struct nvkm_mmu_func *func;
106 	struct nvkm_subdev subdev;
107 
108 	u8  dma_bits;
109 
110 	int heap_nr;
111 	struct {
112 #define NVKM_MEM_VRAM                                                      0x01
113 #define NVKM_MEM_HOST                                                      0x02
114 #define NVKM_MEM_COMP                                                      0x04
115 #define NVKM_MEM_DISP                                                      0x08
116 		u8  type;
117 		u64 size;
118 	} heap[4];
119 
120 	int type_nr;
121 	struct {
122 #define NVKM_MEM_KIND                                                      0x10
123 #define NVKM_MEM_MAPPABLE                                                  0x20
124 #define NVKM_MEM_COHERENT                                                  0x40
125 #define NVKM_MEM_UNCACHED                                                  0x80
126 		u8 type;
127 		u8 heap;
128 	} type[16];
129 
130 	struct nvkm_vmm *vmm;
131 
132 	struct {
133 		struct mutex mutex;
134 		struct list_head list;
135 	} ptc, ptp;
136 
137 	struct mutex mutex; /* serialises mmu invalidations */
138 
139 	struct nvkm_device_oclass user;
140 };
141 
142 int nv04_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
143 int nv41_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
144 int nv44_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
145 int nv50_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
146 int g84_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
147 int mcp77_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
148 int gf100_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
149 int gk104_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
150 int gk20a_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
151 int gm200_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
152 int gm20b_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
153 int gp100_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
154 int gp10b_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
155 int gv100_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
156 int tu102_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
157 #endif
158