xref: /linux/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c (revision 6e7fd890f1d6ac83805409e9c346240de2705584)
1 /*
2  * Copyright 2015 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Ben Skeggs <bskeggs@redhat.com>
23  */
24 #define nvkm_vram(p) container_of((p), struct nvkm_vram, memory)
25 #include "ram.h"
26 
27 #include <core/memory.h>
28 #include <subdev/instmem.h>
29 #include <subdev/mmu.h>
30 
31 struct nvkm_vram {
32 	struct nvkm_memory memory;
33 	struct nvkm_ram *ram;
34 	u8 page;
35 	struct nvkm_mm_node *mn;
36 };
37 
38 static int
39 nvkm_vram_kmap(struct nvkm_memory *memory, struct nvkm_memory **pmemory)
40 {
41 	return nvkm_instobj_wrap(nvkm_vram(memory)->ram->fb->subdev.device, memory, pmemory);
42 }
43 
44 static int
45 nvkm_vram_map(struct nvkm_memory *memory, u64 offset, struct nvkm_vmm *vmm,
46 	      struct nvkm_vma *vma, void *argv, u32 argc)
47 {
48 	struct nvkm_vram *vram = nvkm_vram(memory);
49 	struct nvkm_vmm_map map = {
50 		.memory = &vram->memory,
51 		.offset = offset,
52 		.mem = vram->mn,
53 	};
54 
55 	return nvkm_vmm_map(vmm, vma, argv, argc, &map);
56 }
57 
58 static u64
59 nvkm_vram_size(struct nvkm_memory *memory)
60 {
61 	return (u64)nvkm_mm_size(nvkm_vram(memory)->mn) << NVKM_RAM_MM_SHIFT;
62 }
63 
64 static u64
65 nvkm_vram_addr(struct nvkm_memory *memory)
66 {
67 	struct nvkm_vram *vram = nvkm_vram(memory);
68 	if (!nvkm_mm_contiguous(vram->mn))
69 		return ~0ULL;
70 	return (u64)nvkm_mm_addr(vram->mn) << NVKM_RAM_MM_SHIFT;
71 }
72 
73 static u8
74 nvkm_vram_page(struct nvkm_memory *memory)
75 {
76 	return nvkm_vram(memory)->page;
77 }
78 
79 static enum nvkm_memory_target
80 nvkm_vram_target(struct nvkm_memory *memory)
81 {
82 	return NVKM_MEM_TARGET_VRAM;
83 }
84 
85 static void *
86 nvkm_vram_dtor(struct nvkm_memory *memory)
87 {
88 	struct nvkm_vram *vram = nvkm_vram(memory);
89 	struct nvkm_mm_node *next = vram->mn;
90 	struct nvkm_mm_node *node;
91 
92 	if (next) {
93 		if (likely(next->nl_entry.next)){
94 			mutex_lock(&vram->ram->mutex);
95 			while ((node = next)) {
96 				next = node->next;
97 				nvkm_mm_free(&vram->ram->vram, &node);
98 			}
99 			mutex_unlock(&vram->ram->mutex);
100 		} else {
101 			kfree(vram->mn);
102 		}
103 	}
104 
105 	return vram;
106 }
107 
108 static const struct nvkm_memory_func
109 nvkm_vram = {
110 	.dtor = nvkm_vram_dtor,
111 	.target = nvkm_vram_target,
112 	.page = nvkm_vram_page,
113 	.addr = nvkm_vram_addr,
114 	.size = nvkm_vram_size,
115 	.map = nvkm_vram_map,
116 	.kmap = nvkm_vram_kmap,
117 };
118 
119 int
120 nvkm_ram_wrap(struct nvkm_device *device, u64 addr, u64 size,
121 	      struct nvkm_memory **pmemory)
122 {
123 	struct nvkm_ram *ram;
124 	struct nvkm_vram *vram;
125 
126 	if (!device->fb || !(ram = device->fb->ram))
127 		return -ENODEV;
128 	ram = device->fb->ram;
129 
130 	if (!(vram = kzalloc(sizeof(*vram), GFP_KERNEL)))
131 		return -ENOMEM;
132 
133 	nvkm_memory_ctor(&nvkm_vram, &vram->memory);
134 	vram->ram = ram;
135 	vram->page = NVKM_RAM_MM_SHIFT;
136 	*pmemory = &vram->memory;
137 
138 	vram->mn = kzalloc(sizeof(*vram->mn), GFP_KERNEL);
139 	if (!vram->mn)
140 		return -ENOMEM;
141 
142 	vram->mn->offset = addr >> NVKM_RAM_MM_SHIFT;
143 	vram->mn->length = size >> NVKM_RAM_MM_SHIFT;
144 	return 0;
145 }
146 
147 int
148 nvkm_ram_get(struct nvkm_device *device, u8 heap, u8 type, u8 rpage, u64 size,
149 	     bool contig, bool back, struct nvkm_memory **pmemory)
150 {
151 	struct nvkm_ram *ram;
152 	struct nvkm_mm *mm;
153 	struct nvkm_mm_node **node, *r;
154 	struct nvkm_vram *vram;
155 	u8   page = max(rpage, (u8)NVKM_RAM_MM_SHIFT);
156 	u32 align = (1 << page) >> NVKM_RAM_MM_SHIFT;
157 	u32   max = ALIGN(size, 1 << page) >> NVKM_RAM_MM_SHIFT;
158 	u32   min = contig ? max : align;
159 	int ret;
160 
161 	if (!device->fb || !(ram = device->fb->ram))
162 		return -ENODEV;
163 	ram = device->fb->ram;
164 	mm = &ram->vram;
165 
166 	if (!(vram = kzalloc(sizeof(*vram), GFP_KERNEL)))
167 		return -ENOMEM;
168 	nvkm_memory_ctor(&nvkm_vram, &vram->memory);
169 	vram->ram = ram;
170 	vram->page = page;
171 	*pmemory = &vram->memory;
172 
173 	mutex_lock(&ram->mutex);
174 	node = &vram->mn;
175 	do {
176 		if (back)
177 			ret = nvkm_mm_tail(mm, heap, type, max, min, align, &r);
178 		else
179 			ret = nvkm_mm_head(mm, heap, type, max, min, align, &r);
180 		if (ret) {
181 			mutex_unlock(&ram->mutex);
182 			nvkm_memory_unref(pmemory);
183 			return ret;
184 		}
185 
186 		*node = r;
187 		node = &r->next;
188 		max -= r->length;
189 	} while (max);
190 	mutex_unlock(&ram->mutex);
191 	return 0;
192 }
193 
194 int
195 nvkm_ram_init(struct nvkm_ram *ram)
196 {
197 	if (ram->func->init)
198 		return ram->func->init(ram);
199 	return 0;
200 }
201 
202 void
203 nvkm_ram_del(struct nvkm_ram **pram)
204 {
205 	struct nvkm_ram *ram = *pram;
206 	if (ram && !WARN_ON(!ram->func)) {
207 		if (ram->func->dtor)
208 			*pram = ram->func->dtor(ram);
209 		nvkm_mm_fini(&ram->vram);
210 		mutex_destroy(&ram->mutex);
211 		kfree(*pram);
212 		*pram = NULL;
213 	}
214 }
215 
216 int
217 nvkm_ram_ctor(const struct nvkm_ram_func *func, struct nvkm_fb *fb,
218 	      enum nvkm_ram_type type, u64 size, struct nvkm_ram *ram)
219 {
220 	static const char *name[] = {
221 		[NVKM_RAM_TYPE_UNKNOWN] = "of unknown memory type",
222 		[NVKM_RAM_TYPE_STOLEN ] = "stolen system memory",
223 		[NVKM_RAM_TYPE_SGRAM  ] = "SGRAM",
224 		[NVKM_RAM_TYPE_SDRAM  ] = "SDRAM",
225 		[NVKM_RAM_TYPE_DDR1   ] = "DDR1",
226 		[NVKM_RAM_TYPE_DDR2   ] = "DDR2",
227 		[NVKM_RAM_TYPE_DDR3   ] = "DDR3",
228 		[NVKM_RAM_TYPE_GDDR2  ] = "GDDR2",
229 		[NVKM_RAM_TYPE_GDDR3  ] = "GDDR3",
230 		[NVKM_RAM_TYPE_GDDR4  ] = "GDDR4",
231 		[NVKM_RAM_TYPE_GDDR5  ] = "GDDR5",
232 		[NVKM_RAM_TYPE_GDDR5X ] = "GDDR5X",
233 		[NVKM_RAM_TYPE_GDDR6  ] = "GDDR6",
234 		[NVKM_RAM_TYPE_HBM2   ] = "HBM2",
235 	};
236 	struct nvkm_subdev *subdev = &fb->subdev;
237 	int ret;
238 
239 	nvkm_info(subdev, "%d MiB %s\n", (int)(size >> 20), name[type]);
240 	ram->func = func;
241 	ram->fb = fb;
242 	ram->type = type;
243 	ram->size = size;
244 	mutex_init(&ram->mutex);
245 
246 	if (!nvkm_mm_initialised(&ram->vram)) {
247 		ret = nvkm_mm_init(&ram->vram, NVKM_RAM_MM_NORMAL, 0,
248 				   size >> NVKM_RAM_MM_SHIFT, 1);
249 		if (ret)
250 			return ret;
251 	}
252 
253 	return 0;
254 }
255 
256 int
257 nvkm_ram_new_(const struct nvkm_ram_func *func, struct nvkm_fb *fb,
258 	      enum nvkm_ram_type type, u64 size, struct nvkm_ram **pram)
259 {
260 	if (!(*pram = kzalloc(sizeof(**pram), GFP_KERNEL)))
261 		return -ENOMEM;
262 	return nvkm_ram_ctor(func, fb, type, size, *pram);
263 }
264