xref: /linux/drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c (revision 664b0bae0b87f69bc9deb098f5e0158b9cf18e04)
1 /*
2  * Copyright 2012 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Ben Skeggs
23  */
24 #include <core/gpuobj.h>
25 #include <core/engine.h>
26 
27 #include <subdev/instmem.h>
28 #include <subdev/bar.h>
29 #include <subdev/mmu.h>
30 
31 /* fast-path, where backend is able to provide direct pointer to memory */
32 static u32
nvkm_gpuobj_rd32_fast(struct nvkm_gpuobj * gpuobj,u32 offset)33 nvkm_gpuobj_rd32_fast(struct nvkm_gpuobj *gpuobj, u32 offset)
34 {
35 	return ioread32_native(gpuobj->map + offset);
36 }
37 
38 static void
nvkm_gpuobj_wr32_fast(struct nvkm_gpuobj * gpuobj,u32 offset,u32 data)39 nvkm_gpuobj_wr32_fast(struct nvkm_gpuobj *gpuobj, u32 offset, u32 data)
40 {
41 	iowrite32_native(data, gpuobj->map + offset);
42 }
43 
44 /* accessor functions for gpuobjs allocated directly from instmem */
45 static int
nvkm_gpuobj_heap_map(struct nvkm_gpuobj * gpuobj,u64 offset,struct nvkm_vmm * vmm,struct nvkm_vma * vma,void * argv,u32 argc)46 nvkm_gpuobj_heap_map(struct nvkm_gpuobj *gpuobj, u64 offset,
47 		     struct nvkm_vmm *vmm, struct nvkm_vma *vma,
48 		     void *argv, u32 argc)
49 {
50 	return nvkm_memory_map(gpuobj->memory, offset, vmm, vma, argv, argc);
51 }
52 
53 static u32
nvkm_gpuobj_heap_rd32(struct nvkm_gpuobj * gpuobj,u32 offset)54 nvkm_gpuobj_heap_rd32(struct nvkm_gpuobj *gpuobj, u32 offset)
55 {
56 	return nvkm_ro32(gpuobj->memory, offset);
57 }
58 
59 static void
nvkm_gpuobj_heap_wr32(struct nvkm_gpuobj * gpuobj,u32 offset,u32 data)60 nvkm_gpuobj_heap_wr32(struct nvkm_gpuobj *gpuobj, u32 offset, u32 data)
61 {
62 	nvkm_wo32(gpuobj->memory, offset, data);
63 }
64 
65 static const struct nvkm_gpuobj_func nvkm_gpuobj_heap;
66 static void
nvkm_gpuobj_heap_release(struct nvkm_gpuobj * gpuobj)67 nvkm_gpuobj_heap_release(struct nvkm_gpuobj *gpuobj)
68 {
69 	gpuobj->func = &nvkm_gpuobj_heap;
70 	nvkm_done(gpuobj->memory);
71 }
72 
73 static const struct nvkm_gpuobj_func
74 nvkm_gpuobj_heap_fast = {
75 	.release = nvkm_gpuobj_heap_release,
76 	.rd32 = nvkm_gpuobj_rd32_fast,
77 	.wr32 = nvkm_gpuobj_wr32_fast,
78 	.map = nvkm_gpuobj_heap_map,
79 };
80 
81 static const struct nvkm_gpuobj_func
82 nvkm_gpuobj_heap_slow = {
83 	.release = nvkm_gpuobj_heap_release,
84 	.rd32 = nvkm_gpuobj_heap_rd32,
85 	.wr32 = nvkm_gpuobj_heap_wr32,
86 	.map = nvkm_gpuobj_heap_map,
87 };
88 
89 static void *
nvkm_gpuobj_heap_acquire(struct nvkm_gpuobj * gpuobj)90 nvkm_gpuobj_heap_acquire(struct nvkm_gpuobj *gpuobj)
91 {
92 	gpuobj->map = nvkm_kmap(gpuobj->memory);
93 	if (likely(gpuobj->map))
94 		gpuobj->func = &nvkm_gpuobj_heap_fast;
95 	else
96 		gpuobj->func = &nvkm_gpuobj_heap_slow;
97 	return gpuobj->map;
98 }
99 
100 static const struct nvkm_gpuobj_func
101 nvkm_gpuobj_heap = {
102 	.acquire = nvkm_gpuobj_heap_acquire,
103 	.map = nvkm_gpuobj_heap_map,
104 };
105 
106 /* accessor functions for gpuobjs sub-allocated from a parent gpuobj */
107 static int
nvkm_gpuobj_map(struct nvkm_gpuobj * gpuobj,u64 offset,struct nvkm_vmm * vmm,struct nvkm_vma * vma,void * argv,u32 argc)108 nvkm_gpuobj_map(struct nvkm_gpuobj *gpuobj, u64 offset,
109 		struct nvkm_vmm *vmm, struct nvkm_vma *vma,
110 		void *argv, u32 argc)
111 {
112 	return nvkm_memory_map(gpuobj->parent, gpuobj->node->offset + offset,
113 			       vmm, vma, argv, argc);
114 }
115 
116 static u32
nvkm_gpuobj_rd32(struct nvkm_gpuobj * gpuobj,u32 offset)117 nvkm_gpuobj_rd32(struct nvkm_gpuobj *gpuobj, u32 offset)
118 {
119 	return nvkm_ro32(gpuobj->parent, gpuobj->node->offset + offset);
120 }
121 
122 static void
nvkm_gpuobj_wr32(struct nvkm_gpuobj * gpuobj,u32 offset,u32 data)123 nvkm_gpuobj_wr32(struct nvkm_gpuobj *gpuobj, u32 offset, u32 data)
124 {
125 	nvkm_wo32(gpuobj->parent, gpuobj->node->offset + offset, data);
126 }
127 
128 static const struct nvkm_gpuobj_func nvkm_gpuobj_func;
129 static void
nvkm_gpuobj_release(struct nvkm_gpuobj * gpuobj)130 nvkm_gpuobj_release(struct nvkm_gpuobj *gpuobj)
131 {
132 	gpuobj->func = &nvkm_gpuobj_func;
133 	nvkm_done(gpuobj->parent);
134 }
135 
136 static const struct nvkm_gpuobj_func
137 nvkm_gpuobj_fast = {
138 	.release = nvkm_gpuobj_release,
139 	.rd32 = nvkm_gpuobj_rd32_fast,
140 	.wr32 = nvkm_gpuobj_wr32_fast,
141 	.map = nvkm_gpuobj_map,
142 };
143 
144 static const struct nvkm_gpuobj_func
145 nvkm_gpuobj_slow = {
146 	.release = nvkm_gpuobj_release,
147 	.rd32 = nvkm_gpuobj_rd32,
148 	.wr32 = nvkm_gpuobj_wr32,
149 	.map = nvkm_gpuobj_map,
150 };
151 
152 static void *
nvkm_gpuobj_acquire(struct nvkm_gpuobj * gpuobj)153 nvkm_gpuobj_acquire(struct nvkm_gpuobj *gpuobj)
154 {
155 	gpuobj->map = nvkm_kmap(gpuobj->parent);
156 	if (likely(gpuobj->map)) {
157 		gpuobj->map  = (u8 *)gpuobj->map + gpuobj->node->offset;
158 		gpuobj->func = &nvkm_gpuobj_fast;
159 	} else {
160 		gpuobj->func = &nvkm_gpuobj_slow;
161 	}
162 	return gpuobj->map;
163 }
164 
165 static const struct nvkm_gpuobj_func
166 nvkm_gpuobj_func = {
167 	.acquire = nvkm_gpuobj_acquire,
168 	.map = nvkm_gpuobj_map,
169 };
170 
171 static int
nvkm_gpuobj_ctor(struct nvkm_device * device,u32 size,int align,bool zero,struct nvkm_gpuobj * parent,struct nvkm_gpuobj * gpuobj)172 nvkm_gpuobj_ctor(struct nvkm_device *device, u32 size, int align, bool zero,
173 		 struct nvkm_gpuobj *parent, struct nvkm_gpuobj *gpuobj)
174 {
175 	u32 offset;
176 	int ret;
177 
178 	if (parent) {
179 		if (align >= 0) {
180 			ret = nvkm_mm_head(&parent->heap, 0, 1, size, size,
181 					   max(align, 1), &gpuobj->node);
182 		} else {
183 			ret = nvkm_mm_tail(&parent->heap, 0, 1, size, size,
184 					   -align, &gpuobj->node);
185 		}
186 		if (ret)
187 			return ret;
188 
189 		gpuobj->parent = parent;
190 		gpuobj->func = &nvkm_gpuobj_func;
191 		gpuobj->addr = parent->addr + gpuobj->node->offset;
192 		gpuobj->size = gpuobj->node->length;
193 
194 		if (zero) {
195 			nvkm_kmap(gpuobj);
196 			for (offset = 0; offset < gpuobj->size; offset += 4)
197 				nvkm_wo32(gpuobj, offset, 0x00000000);
198 			nvkm_done(gpuobj);
199 		}
200 	} else {
201 		ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, size,
202 				      abs(align), zero, &gpuobj->memory);
203 		if (ret)
204 			return ret;
205 
206 		gpuobj->func = &nvkm_gpuobj_heap;
207 		gpuobj->addr = nvkm_memory_addr(gpuobj->memory);
208 		gpuobj->size = nvkm_memory_size(gpuobj->memory);
209 	}
210 
211 	return nvkm_mm_init(&gpuobj->heap, 0, 0, gpuobj->size, 1);
212 }
213 
214 void
nvkm_gpuobj_del(struct nvkm_gpuobj ** pgpuobj)215 nvkm_gpuobj_del(struct nvkm_gpuobj **pgpuobj)
216 {
217 	struct nvkm_gpuobj *gpuobj = *pgpuobj;
218 	if (gpuobj) {
219 		if (gpuobj->parent)
220 			nvkm_mm_free(&gpuobj->parent->heap, &gpuobj->node);
221 		nvkm_mm_fini(&gpuobj->heap);
222 		nvkm_memory_unref(&gpuobj->memory);
223 		kfree(*pgpuobj);
224 		*pgpuobj = NULL;
225 	}
226 }
227 
228 int
nvkm_gpuobj_new(struct nvkm_device * device,u32 size,int align,bool zero,struct nvkm_gpuobj * parent,struct nvkm_gpuobj ** pgpuobj)229 nvkm_gpuobj_new(struct nvkm_device *device, u32 size, int align, bool zero,
230 		struct nvkm_gpuobj *parent, struct nvkm_gpuobj **pgpuobj)
231 {
232 	struct nvkm_gpuobj *gpuobj;
233 	int ret;
234 
235 	if (!(gpuobj = *pgpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL)))
236 		return -ENOMEM;
237 
238 	ret = nvkm_gpuobj_ctor(device, size, align, zero, parent, gpuobj);
239 	if (ret)
240 		nvkm_gpuobj_del(pgpuobj);
241 	return ret;
242 }
243 
244 /* the below is basically only here to support sharing the paged dma object
245  * for PCI(E)GART on <=nv4x chipsets, and should *not* be expected to work
246  * anywhere else.
247  */
248 
249 int
nvkm_gpuobj_wrap(struct nvkm_memory * memory,struct nvkm_gpuobj ** pgpuobj)250 nvkm_gpuobj_wrap(struct nvkm_memory *memory, struct nvkm_gpuobj **pgpuobj)
251 {
252 	if (!(*pgpuobj = kzalloc(sizeof(**pgpuobj), GFP_KERNEL)))
253 		return -ENOMEM;
254 
255 	(*pgpuobj)->addr = nvkm_memory_addr(memory);
256 	(*pgpuobj)->size = nvkm_memory_size(memory);
257 	return 0;
258 }
259 
260 void
nvkm_gpuobj_memcpy_to(struct nvkm_gpuobj * dst,u32 dstoffset,void * src,u32 length)261 nvkm_gpuobj_memcpy_to(struct nvkm_gpuobj *dst, u32 dstoffset, void *src,
262 		      u32 length)
263 {
264 	int i;
265 
266 	for (i = 0; i < length; i += 4)
267 		nvkm_wo32(dst, dstoffset + i, *(u32 *)(src + i));
268 }
269 
270 void
nvkm_gpuobj_memcpy_from(void * dst,struct nvkm_gpuobj * src,u32 srcoffset,u32 length)271 nvkm_gpuobj_memcpy_from(void *dst, struct nvkm_gpuobj *src, u32 srcoffset,
272 			u32 length)
273 {
274 	int i;
275 
276 	for (i = 0; i < length; i += 4)
277 		((u32 *)src)[i / 4] = nvkm_ro32(src, srcoffset + i);
278 }
279