xref: /linux/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv50.c (revision 7f5f518fd70b1b72ca4cf8249ca3306846383ed4)
1 /*
2  * Copyright 2010 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Ben Skeggs
23  */
24 #include <subdev/mmu.h>
25 #include <subdev/bar.h>
26 #include <subdev/fb.h>
27 #include <subdev/timer.h>
28 
29 #include <core/engine.h>
30 #include <core/gpuobj.h>
31 
32 static void
33 nv50_vm_map_pgt(struct nvkm_gpuobj *pgd, u32 pde, struct nvkm_gpuobj *pgt[2])
34 {
35 	u64 phys = 0xdeadcafe00000000ULL;
36 	u32 coverage = 0;
37 
38 	if (pgt[0]) {
39 		phys = 0x00000003 | pgt[0]->addr; /* present, 4KiB pages */
40 		coverage = (pgt[0]->size >> 3) << 12;
41 	} else
42 	if (pgt[1]) {
43 		phys = 0x00000001 | pgt[1]->addr; /* present */
44 		coverage = (pgt[1]->size >> 3) << 16;
45 	}
46 
47 	if (phys & 1) {
48 		if (coverage <= 32 * 1024 * 1024)
49 			phys |= 0x60;
50 		else if (coverage <= 64 * 1024 * 1024)
51 			phys |= 0x40;
52 		else if (coverage <= 128 * 1024 * 1024)
53 			phys |= 0x20;
54 	}
55 
56 	nv_wo32(pgd, (pde * 8) + 0, lower_32_bits(phys));
57 	nv_wo32(pgd, (pde * 8) + 4, upper_32_bits(phys));
58 }
59 
60 static inline u64
61 vm_addr(struct nvkm_vma *vma, u64 phys, u32 memtype, u32 target)
62 {
63 	phys |= 1; /* present */
64 	phys |= (u64)memtype << 40;
65 	phys |= target << 4;
66 	if (vma->access & NV_MEM_ACCESS_SYS)
67 		phys |= (1 << 6);
68 	if (!(vma->access & NV_MEM_ACCESS_WO))
69 		phys |= (1 << 3);
70 	return phys;
71 }
72 
73 static void
74 nv50_vm_map(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt,
75 	    struct nvkm_mem *mem, u32 pte, u32 cnt, u64 phys, u64 delta)
76 {
77 	u32 comp = (mem->memtype & 0x180) >> 7;
78 	u32 block, target;
79 	int i;
80 
81 	/* IGPs don't have real VRAM, re-target to stolen system memory */
82 	target = 0;
83 	if (nvkm_fb(vma->vm->mmu)->ram->stolen) {
84 		phys += nvkm_fb(vma->vm->mmu)->ram->stolen;
85 		target = 3;
86 	}
87 
88 	phys  = vm_addr(vma, phys, mem->memtype, target);
89 	pte <<= 3;
90 	cnt <<= 3;
91 
92 	while (cnt) {
93 		u32 offset_h = upper_32_bits(phys);
94 		u32 offset_l = lower_32_bits(phys);
95 
96 		for (i = 7; i >= 0; i--) {
97 			block = 1 << (i + 3);
98 			if (cnt >= block && !(pte & (block - 1)))
99 				break;
100 		}
101 		offset_l |= (i << 7);
102 
103 		phys += block << (vma->node->type - 3);
104 		cnt  -= block;
105 		if (comp) {
106 			u32 tag = mem->tag->offset + ((delta >> 16) * comp);
107 			offset_h |= (tag << 17);
108 			delta    += block << (vma->node->type - 3);
109 		}
110 
111 		while (block) {
112 			nv_wo32(pgt, pte + 0, offset_l);
113 			nv_wo32(pgt, pte + 4, offset_h);
114 			pte += 8;
115 			block -= 8;
116 		}
117 	}
118 }
119 
120 static void
121 nv50_vm_map_sg(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt,
122 	       struct nvkm_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
123 {
124 	u32 target = (vma->access & NV_MEM_ACCESS_NOSNOOP) ? 3 : 2;
125 	pte <<= 3;
126 	while (cnt--) {
127 		u64 phys = vm_addr(vma, (u64)*list++, mem->memtype, target);
128 		nv_wo32(pgt, pte + 0, lower_32_bits(phys));
129 		nv_wo32(pgt, pte + 4, upper_32_bits(phys));
130 		pte += 8;
131 	}
132 }
133 
134 static void
135 nv50_vm_unmap(struct nvkm_gpuobj *pgt, u32 pte, u32 cnt)
136 {
137 	pte <<= 3;
138 	while (cnt--) {
139 		nv_wo32(pgt, pte + 0, 0x00000000);
140 		nv_wo32(pgt, pte + 4, 0x00000000);
141 		pte += 8;
142 	}
143 }
144 
145 static void
146 nv50_vm_flush(struct nvkm_vm *vm)
147 {
148 	struct nvkm_mmu *mmu = (void *)vm->mmu;
149 	struct nvkm_subdev *subdev = &mmu->subdev;
150 	struct nvkm_device *device = subdev->device;
151 	struct nvkm_bar *bar = device->bar;
152 	struct nvkm_engine *engine;
153 	int i, vme;
154 
155 	bar->flush(bar);
156 
157 	mutex_lock(&subdev->mutex);
158 	for (i = 0; i < NVDEV_SUBDEV_NR; i++) {
159 		if (!atomic_read(&vm->engref[i]))
160 			continue;
161 
162 		/* unfortunate hw bug workaround... */
163 		engine = nvkm_engine(mmu, i);
164 		if (engine && engine->tlb_flush) {
165 			engine->tlb_flush(engine);
166 			continue;
167 		}
168 
169 		switch (i) {
170 		case NVDEV_ENGINE_GR    : vme = 0x00; break;
171 		case NVDEV_ENGINE_VP    :
172 		case NVDEV_ENGINE_MSPDEC: vme = 0x01; break;
173 		case NVDEV_SUBDEV_BAR   : vme = 0x06; break;
174 		case NVDEV_ENGINE_MSPPP :
175 		case NVDEV_ENGINE_MPEG  : vme = 0x08; break;
176 		case NVDEV_ENGINE_BSP   :
177 		case NVDEV_ENGINE_MSVLD : vme = 0x09; break;
178 		case NVDEV_ENGINE_CIPHER:
179 		case NVDEV_ENGINE_SEC   : vme = 0x0a; break;
180 		case NVDEV_ENGINE_CE0   : vme = 0x0d; break;
181 		default:
182 			continue;
183 		}
184 
185 		nvkm_wr32(device, 0x100c80, (vme << 16) | 1);
186 		if (nvkm_msec(device, 2000,
187 			if (!(nvkm_rd32(device, 0x100c80) & 0x00000001))
188 				break;
189 		) < 0)
190 			nvkm_error(subdev, "vm flush timeout: engine %d\n", vme);
191 	}
192 	mutex_unlock(&subdev->mutex);
193 }
194 
195 static int
196 nv50_vm_create(struct nvkm_mmu *mmu, u64 offset, u64 length,
197 	       u64 mm_offset, struct nvkm_vm **pvm)
198 {
199 	u32 block = (1 << (mmu->pgt_bits + 12));
200 	if (block > length)
201 		block = length;
202 
203 	return nvkm_vm_create(mmu, offset, length, mm_offset, block, pvm);
204 }
205 
206 static int
207 nv50_mmu_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
208 	      struct nvkm_oclass *oclass, void *data, u32 size,
209 	      struct nvkm_object **pobject)
210 {
211 	struct nvkm_mmu *mmu;
212 	int ret;
213 
214 	ret = nvkm_mmu_create(parent, engine, oclass, "VM", "mmu", &mmu);
215 	*pobject = nv_object(mmu);
216 	if (ret)
217 		return ret;
218 
219 	mmu->limit = 1ULL << 40;
220 	mmu->dma_bits = 40;
221 	mmu->pgt_bits  = 29 - 12;
222 	mmu->spg_shift = 12;
223 	mmu->lpg_shift = 16;
224 	mmu->create = nv50_vm_create;
225 	mmu->map_pgt = nv50_vm_map_pgt;
226 	mmu->map = nv50_vm_map;
227 	mmu->map_sg = nv50_vm_map_sg;
228 	mmu->unmap = nv50_vm_unmap;
229 	mmu->flush = nv50_vm_flush;
230 	return 0;
231 }
232 
233 struct nvkm_oclass
234 nv50_mmu_oclass = {
235 	.handle = NV_SUBDEV(MMU, 0x50),
236 	.ofuncs = &(struct nvkm_ofuncs) {
237 		.ctor = nv50_mmu_ctor,
238 		.dtor = _nvkm_mmu_dtor,
239 		.init = _nvkm_mmu_init,
240 		.fini = _nvkm_mmu_fini,
241 	},
242 };
243