xref: /linux/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv44.c (revision 7f5f518fd70b1b72ca4cf8249ca3306846383ed4)
1 /*
2  * Copyright 2012 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Ben Skeggs
23  */
24 #include "nv04.h"
25 
26 #include <core/gpuobj.h>
27 #include <core/option.h>
28 #include <subdev/timer.h>
29 
30 #define NV44_GART_SIZE (512 * 1024 * 1024)
31 #define NV44_GART_PAGE (  4 * 1024)
32 
33 /*******************************************************************************
34  * VM map/unmap callbacks
35  ******************************************************************************/
36 
37 static void
38 nv44_vm_fill(struct nvkm_gpuobj *pgt, dma_addr_t null,
39 	     dma_addr_t *list, u32 pte, u32 cnt)
40 {
41 	u32 base = (pte << 2) & ~0x0000000f;
42 	u32 tmp[4];
43 
44 	tmp[0] = nv_ro32(pgt, base + 0x0);
45 	tmp[1] = nv_ro32(pgt, base + 0x4);
46 	tmp[2] = nv_ro32(pgt, base + 0x8);
47 	tmp[3] = nv_ro32(pgt, base + 0xc);
48 
49 	while (cnt--) {
50 		u32 addr = list ? (*list++ >> 12) : (null >> 12);
51 		switch (pte++ & 0x3) {
52 		case 0:
53 			tmp[0] &= ~0x07ffffff;
54 			tmp[0] |= addr;
55 			break;
56 		case 1:
57 			tmp[0] &= ~0xf8000000;
58 			tmp[0] |= addr << 27;
59 			tmp[1] &= ~0x003fffff;
60 			tmp[1] |= addr >> 5;
61 			break;
62 		case 2:
63 			tmp[1] &= ~0xffc00000;
64 			tmp[1] |= addr << 22;
65 			tmp[2] &= ~0x0001ffff;
66 			tmp[2] |= addr >> 10;
67 			break;
68 		case 3:
69 			tmp[2] &= ~0xfffe0000;
70 			tmp[2] |= addr << 17;
71 			tmp[3] &= ~0x00000fff;
72 			tmp[3] |= addr >> 15;
73 			break;
74 		}
75 	}
76 
77 	nv_wo32(pgt, base + 0x0, tmp[0]);
78 	nv_wo32(pgt, base + 0x4, tmp[1]);
79 	nv_wo32(pgt, base + 0x8, tmp[2]);
80 	nv_wo32(pgt, base + 0xc, tmp[3] | 0x40000000);
81 }
82 
83 static void
84 nv44_vm_map_sg(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt,
85 	       struct nvkm_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
86 {
87 	struct nv04_mmu *mmu = (void *)vma->vm->mmu;
88 	u32 tmp[4];
89 	int i;
90 
91 	if (pte & 3) {
92 		u32  max = 4 - (pte & 3);
93 		u32 part = (cnt > max) ? max : cnt;
94 		nv44_vm_fill(pgt, mmu->null, list, pte, part);
95 		pte  += part;
96 		list += part;
97 		cnt  -= part;
98 	}
99 
100 	while (cnt >= 4) {
101 		for (i = 0; i < 4; i++)
102 			tmp[i] = *list++ >> 12;
103 		nv_wo32(pgt, pte++ * 4, tmp[0] >>  0 | tmp[1] << 27);
104 		nv_wo32(pgt, pte++ * 4, tmp[1] >>  5 | tmp[2] << 22);
105 		nv_wo32(pgt, pte++ * 4, tmp[2] >> 10 | tmp[3] << 17);
106 		nv_wo32(pgt, pte++ * 4, tmp[3] >> 15 | 0x40000000);
107 		cnt -= 4;
108 	}
109 
110 	if (cnt)
111 		nv44_vm_fill(pgt, mmu->null, list, pte, cnt);
112 }
113 
114 static void
115 nv44_vm_unmap(struct nvkm_gpuobj *pgt, u32 pte, u32 cnt)
116 {
117 	struct nv04_mmu *mmu = (void *)nvkm_mmu(pgt);
118 
119 	if (pte & 3) {
120 		u32  max = 4 - (pte & 3);
121 		u32 part = (cnt > max) ? max : cnt;
122 		nv44_vm_fill(pgt, mmu->null, NULL, pte, part);
123 		pte  += part;
124 		cnt  -= part;
125 	}
126 
127 	while (cnt >= 4) {
128 		nv_wo32(pgt, pte++ * 4, 0x00000000);
129 		nv_wo32(pgt, pte++ * 4, 0x00000000);
130 		nv_wo32(pgt, pte++ * 4, 0x00000000);
131 		nv_wo32(pgt, pte++ * 4, 0x00000000);
132 		cnt -= 4;
133 	}
134 
135 	if (cnt)
136 		nv44_vm_fill(pgt, mmu->null, NULL, pte, cnt);
137 }
138 
139 static void
140 nv44_vm_flush(struct nvkm_vm *vm)
141 {
142 	struct nv04_mmu *mmu = (void *)vm->mmu;
143 	struct nvkm_device *device = mmu->base.subdev.device;
144 	nvkm_wr32(device, 0x100814, mmu->base.limit - NV44_GART_PAGE);
145 	nvkm_wr32(device, 0x100808, 0x00000020);
146 	nvkm_msec(device, 2000,
147 		if (nvkm_rd32(device, 0x100808) & 0x00000001)
148 			break;
149 	);
150 	nvkm_wr32(device, 0x100808, 0x00000000);
151 }
152 
153 /*******************************************************************************
154  * MMU subdev
155  ******************************************************************************/
156 
157 static int
158 nv44_mmu_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
159 	      struct nvkm_oclass *oclass, void *data, u32 size,
160 	      struct nvkm_object **pobject)
161 {
162 	struct nvkm_device *device = nv_device(parent);
163 	struct nv04_mmu *mmu;
164 	int ret;
165 
166 	if (pci_find_capability(device->pdev, PCI_CAP_ID_AGP) ||
167 	    !nvkm_boolopt(device->cfgopt, "NvPCIE", true)) {
168 		return nvkm_object_ctor(parent, engine, &nv04_mmu_oclass,
169 					data, size, pobject);
170 	}
171 
172 	ret = nvkm_mmu_create(parent, engine, oclass, "PCIEGART",
173 			      "mmu", &mmu);
174 	*pobject = nv_object(mmu);
175 	if (ret)
176 		return ret;
177 
178 	mmu->base.create = nv04_vm_create;
179 	mmu->base.limit = NV44_GART_SIZE;
180 	mmu->base.dma_bits = 39;
181 	mmu->base.pgt_bits = 32 - 12;
182 	mmu->base.spg_shift = 12;
183 	mmu->base.lpg_shift = 12;
184 	mmu->base.map_sg = nv44_vm_map_sg;
185 	mmu->base.unmap = nv44_vm_unmap;
186 	mmu->base.flush = nv44_vm_flush;
187 
188 	mmu->nullp = pci_alloc_consistent(device->pdev, 16 * 1024, &mmu->null);
189 	if (!mmu->nullp) {
190 		nvkm_warn(&mmu->base.subdev, "unable to allocate dummy pages\n");
191 		mmu->null = 0;
192 	}
193 
194 	ret = nvkm_vm_create(&mmu->base, 0, NV44_GART_SIZE, 0, 4096,
195 			     &mmu->vm);
196 	if (ret)
197 		return ret;
198 
199 	ret = nvkm_gpuobj_new(nv_object(mmu), NULL,
200 			      (NV44_GART_SIZE / NV44_GART_PAGE) * 4,
201 			      512 * 1024, NVOBJ_FLAG_ZERO_ALLOC,
202 			      &mmu->vm->pgt[0].obj[0]);
203 	mmu->vm->pgt[0].refcount[0] = 1;
204 	if (ret)
205 		return ret;
206 
207 	return 0;
208 }
209 
210 static int
211 nv44_mmu_init(struct nvkm_object *object)
212 {
213 	struct nv04_mmu *mmu = (void *)object;
214 	struct nvkm_device *device = mmu->base.subdev.device;
215 	struct nvkm_gpuobj *gart = mmu->vm->pgt[0].obj[0];
216 	u32 addr;
217 	int ret;
218 
219 	ret = nvkm_mmu_init(&mmu->base);
220 	if (ret)
221 		return ret;
222 
223 	/* calculate vram address of this PRAMIN block, object must be
224 	 * allocated on 512KiB alignment, and not exceed a total size
225 	 * of 512KiB for this to work correctly
226 	 */
227 	addr  = nvkm_rd32(device, 0x10020c);
228 	addr -= ((gart->addr >> 19) + 1) << 19;
229 
230 	nvkm_wr32(device, 0x100850, 0x80000000);
231 	nvkm_wr32(device, 0x100818, mmu->null);
232 	nvkm_wr32(device, 0x100804, NV44_GART_SIZE);
233 	nvkm_wr32(device, 0x100850, 0x00008000);
234 	nvkm_mask(device, 0x10008c, 0x00000200, 0x00000200);
235 	nvkm_wr32(device, 0x100820, 0x00000000);
236 	nvkm_wr32(device, 0x10082c, 0x00000001);
237 	nvkm_wr32(device, 0x100800, addr | 0x00000010);
238 	return 0;
239 }
240 
241 struct nvkm_oclass
242 nv44_mmu_oclass = {
243 	.handle = NV_SUBDEV(MMU, 0x44),
244 	.ofuncs = &(struct nvkm_ofuncs) {
245 		.ctor = nv44_mmu_ctor,
246 		.dtor = nv04_mmu_dtor,
247 		.init = nv44_mmu_init,
248 		.fini = _nvkm_mmu_fini,
249 	},
250 };
251