xref: /linux/drivers/gpu/drm/gma500/gem.c (revision f6e8dc9edf963dbc99085e54f6ced6da9daa6100)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  psb GEM interface
4  *
5  * Copyright (c) 2011, Intel Corporation.
6  *
7  * Authors: Alan Cox
8  *
9  * TODO:
10  *	-	we need to work out if the MMU is relevant (eg for
11  *		accelerated operations on a GEM object)
12  */
13 
14 #include <linux/pagemap.h>
15 
16 #include <asm/set_memory.h>
17 
18 #include <drm/drm.h>
19 #include <drm/drm_print.h>
20 #include <drm/drm_vma_manager.h>
21 
22 #include "gem.h"
23 #include "psb_drv.h"
24 
25 /*
26  * PSB GEM object
27  */
28 
29 int psb_gem_pin(struct psb_gem_object *pobj)
30 {
31 	struct drm_gem_object *obj = &pobj->base;
32 	struct drm_device *dev = obj->dev;
33 	struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
34 	u32 gpu_base = dev_priv->gtt.gatt_start;
35 	struct page **pages;
36 	unsigned int npages;
37 	int ret;
38 
39 	ret = dma_resv_lock(obj->resv, NULL);
40 	if (drm_WARN_ONCE(dev, ret, "dma_resv_lock() failed, ret=%d\n", ret))
41 		return ret;
42 
43 	if (pobj->in_gart || pobj->stolen)
44 		goto out; /* already mapped */
45 
46 	pages = drm_gem_get_pages(obj);
47 	if (IS_ERR(pages)) {
48 		ret = PTR_ERR(pages);
49 		goto err_dma_resv_unlock;
50 	}
51 
52 	npages = obj->size / PAGE_SIZE;
53 
54 	set_pages_array_wc(pages, npages);
55 
56 	psb_gtt_insert_pages(dev_priv, &pobj->resource, pages);
57 	psb_mmu_insert_pages(psb_mmu_get_default_pd(dev_priv->mmu), pages,
58 			     (gpu_base + pobj->offset), npages, 0, 0,
59 			     PSB_MMU_CACHED_MEMORY);
60 
61 	pobj->pages = pages;
62 
63 out:
64 	++pobj->in_gart;
65 	dma_resv_unlock(obj->resv);
66 
67 	return 0;
68 
69 err_dma_resv_unlock:
70 	dma_resv_unlock(obj->resv);
71 	return ret;
72 }
73 
74 void psb_gem_unpin(struct psb_gem_object *pobj)
75 {
76 	struct drm_gem_object *obj = &pobj->base;
77 	struct drm_device *dev = obj->dev;
78 	struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
79 	u32 gpu_base = dev_priv->gtt.gatt_start;
80 	unsigned long npages;
81 	int ret;
82 
83 	ret = dma_resv_lock(obj->resv, NULL);
84 	if (drm_WARN_ONCE(dev, ret, "dma_resv_lock() failed, ret=%d\n", ret))
85 		return;
86 
87 	WARN_ON(!pobj->in_gart);
88 
89 	--pobj->in_gart;
90 
91 	if (pobj->in_gart || pobj->stolen)
92 		goto out;
93 
94 	npages = obj->size / PAGE_SIZE;
95 
96 	psb_mmu_remove_pages(psb_mmu_get_default_pd(dev_priv->mmu),
97 			     (gpu_base + pobj->offset), npages, 0, 0);
98 	psb_gtt_remove_pages(dev_priv, &pobj->resource);
99 
100 	/* Reset caching flags */
101 	set_pages_array_wb(pobj->pages, npages);
102 
103 	drm_gem_put_pages(obj, pobj->pages, true, false);
104 	pobj->pages = NULL;
105 
106 out:
107 	dma_resv_unlock(obj->resv);
108 }
109 
110 static vm_fault_t psb_gem_fault(struct vm_fault *vmf);
111 
112 static void psb_gem_free_object(struct drm_gem_object *obj)
113 {
114 	struct psb_gem_object *pobj = to_psb_gem_object(obj);
115 
116 	/* Undo the mmap pin if we are destroying the object */
117 	if (pobj->mmapping)
118 		psb_gem_unpin(pobj);
119 
120 	drm_gem_object_release(obj);
121 
122 	WARN_ON(pobj->in_gart && !pobj->stolen);
123 
124 	release_resource(&pobj->resource);
125 	kfree(pobj);
126 }
127 
128 static const struct vm_operations_struct psb_gem_vm_ops = {
129 	.fault = psb_gem_fault,
130 	.open = drm_gem_vm_open,
131 	.close = drm_gem_vm_close,
132 };
133 
134 static const struct drm_gem_object_funcs psb_gem_object_funcs = {
135 	.free = psb_gem_free_object,
136 	.vm_ops = &psb_gem_vm_ops,
137 };
138 
139 struct psb_gem_object *
140 psb_gem_create(struct drm_device *dev, u64 size, const char *name, bool stolen, u32 align)
141 {
142 	struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
143 	struct psb_gem_object *pobj;
144 	struct drm_gem_object *obj;
145 	int ret;
146 
147 	size = roundup(size, PAGE_SIZE);
148 
149 	pobj = kzalloc(sizeof(*pobj), GFP_KERNEL);
150 	if (!pobj)
151 		return ERR_PTR(-ENOMEM);
152 	obj = &pobj->base;
153 
154 	/* GTT resource */
155 
156 	ret = psb_gtt_allocate_resource(dev_priv, &pobj->resource, name, size, align, stolen,
157 					&pobj->offset);
158 	if (ret)
159 		goto err_kfree;
160 
161 	if (stolen) {
162 		pobj->stolen = true;
163 		pobj->in_gart = 1;
164 	}
165 
166 	/* GEM object */
167 
168 	obj->funcs = &psb_gem_object_funcs;
169 
170 	if (stolen) {
171 		drm_gem_private_object_init(dev, obj, size);
172 	} else {
173 		ret = drm_gem_object_init(dev, obj, size);
174 		if (ret)
175 			goto err_release_resource;
176 
177 		/* Limit the object to 32-bit mappings */
178 		mapping_set_gfp_mask(obj->filp->f_mapping, GFP_KERNEL | __GFP_DMA32);
179 	}
180 
181 	return pobj;
182 
183 err_release_resource:
184 	release_resource(&pobj->resource);
185 err_kfree:
186 	kfree(pobj);
187 	return ERR_PTR(ret);
188 }
189 
190 /**
191  *	psb_gem_dumb_create	-	create a dumb buffer
192  *	@file: our client file
193  *	@dev: our device
194  *	@args: the requested arguments copied from userspace
195  *
196  *	Allocate a buffer suitable for use for a frame buffer of the
197  *	form described by user space. Give userspace a handle by which
198  *	to reference it.
199  */
200 int psb_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
201 			struct drm_mode_create_dumb *args)
202 {
203 	size_t pitch, size;
204 	struct psb_gem_object *pobj;
205 	struct drm_gem_object *obj;
206 	u32 handle;
207 	int ret;
208 
209 	pitch = args->width * DIV_ROUND_UP(args->bpp, 8);
210 	pitch = ALIGN(pitch, 64);
211 
212 	size = pitch * args->height;
213 	size = roundup(size, PAGE_SIZE);
214 	if (!size)
215 		return -EINVAL;
216 
217 	pobj = psb_gem_create(dev, size, "gem", false, PAGE_SIZE);
218 	if (IS_ERR(pobj))
219 		return PTR_ERR(pobj);
220 	obj = &pobj->base;
221 
222 	ret = drm_gem_handle_create(file, obj, &handle);
223 	if (ret)
224 		goto err_drm_gem_object_put;
225 
226 	drm_gem_object_put(obj);
227 
228 	args->pitch = pitch;
229 	args->size = size;
230 	args->handle = handle;
231 
232 	return 0;
233 
234 err_drm_gem_object_put:
235 	drm_gem_object_put(obj);
236 	return ret;
237 }
238 
239 /**
240  *	psb_gem_fault		-	pagefault handler for GEM objects
241  *	@vmf: fault detail
242  *
243  *	Invoked when a fault occurs on an mmap of a GEM managed area. GEM
244  *	does most of the work for us including the actual map/unmap calls
245  *	but we need to do the actual page work.
246  *
247  *	This code eventually needs to handle faulting objects in and out
248  *	of the GTT and repacking it when we run out of space. We can put
249  *	that off for now and for our simple uses
250  *
251  *	The VMA was set up by GEM. In doing so it also ensured that the
252  *	vma->vm_private_data points to the GEM object that is backing this
253  *	mapping.
254  */
255 static vm_fault_t psb_gem_fault(struct vm_fault *vmf)
256 {
257 	struct vm_area_struct *vma = vmf->vma;
258 	struct drm_gem_object *obj;
259 	struct psb_gem_object *pobj;
260 	int err;
261 	vm_fault_t ret;
262 	unsigned long pfn;
263 	pgoff_t page_offset;
264 	struct drm_device *dev;
265 	struct drm_psb_private *dev_priv;
266 
267 	obj = vma->vm_private_data;	/* GEM object */
268 	dev = obj->dev;
269 	dev_priv = to_drm_psb_private(dev);
270 
271 	pobj = to_psb_gem_object(obj);
272 
273 	/* Make sure we don't parallel update on a fault, nor move or remove
274 	   something from beneath our feet */
275 	mutex_lock(&dev_priv->mmap_mutex);
276 
277 	/* For now the mmap pins the object and it stays pinned. As things
278 	   stand that will do us no harm */
279 	if (pobj->mmapping == 0) {
280 		err = psb_gem_pin(pobj);
281 		if (err < 0) {
282 			dev_err(dev->dev, "gma500: pin failed: %d\n", err);
283 			ret = vmf_error(err);
284 			goto fail;
285 		}
286 		pobj->mmapping = 1;
287 	}
288 
289 	/* Page relative to the VMA start - we must calculate this ourselves
290 	   because vmf->pgoff is the fake GEM offset */
291 	page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
292 
293 	/* CPU view of the page, don't go via the GART for CPU writes */
294 	if (pobj->stolen)
295 		pfn = (dev_priv->stolen_base + pobj->offset) >> PAGE_SHIFT;
296 	else
297 		pfn = page_to_pfn(pobj->pages[page_offset]);
298 	ret = vmf_insert_pfn(vma, vmf->address, pfn);
299 fail:
300 	mutex_unlock(&dev_priv->mmap_mutex);
301 
302 	return ret;
303 }
304 
305 /*
306  * Memory management
307  */
308 
309 /* Insert vram stolen pages into the GTT. */
310 static void psb_gem_mm_populate_stolen(struct drm_psb_private *pdev)
311 {
312 	struct drm_device *dev = &pdev->dev;
313 	unsigned int pfn_base;
314 	unsigned int i, num_pages;
315 	uint32_t pte;
316 
317 	pfn_base = pdev->stolen_base >> PAGE_SHIFT;
318 	num_pages = pdev->vram_stolen_size >> PAGE_SHIFT;
319 
320 	drm_dbg(dev, "Set up %u stolen pages starting at 0x%08x, GTT offset %dK\n",
321 		num_pages, pfn_base << PAGE_SHIFT, 0);
322 
323 	for (i = 0; i < num_pages; ++i) {
324 		pte = psb_gtt_mask_pte(pfn_base + i, PSB_MMU_CACHED_MEMORY);
325 		iowrite32(pte, pdev->gtt_map + i);
326 	}
327 
328 	(void)ioread32(pdev->gtt_map + i - 1);
329 }
330 
331 int psb_gem_mm_init(struct drm_device *dev)
332 {
333 	struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
334 	struct pci_dev *pdev = to_pci_dev(dev->dev);
335 	unsigned long stolen_size, vram_stolen_size;
336 	struct psb_gtt *pg;
337 	int ret;
338 
339 	mutex_init(&dev_priv->mmap_mutex);
340 
341 	pg = &dev_priv->gtt;
342 
343 	pci_read_config_dword(pdev, PSB_BSM, &dev_priv->stolen_base);
344 	vram_stolen_size = pg->gtt_phys_start - dev_priv->stolen_base - PAGE_SIZE;
345 
346 	stolen_size = vram_stolen_size;
347 
348 	dev_dbg(dev->dev, "Stolen memory base 0x%x, size %luK\n",
349 		dev_priv->stolen_base, vram_stolen_size / 1024);
350 
351 	pg->stolen_size = stolen_size;
352 	dev_priv->vram_stolen_size = vram_stolen_size;
353 
354 	dev_priv->vram_addr = ioremap_wc(dev_priv->stolen_base, stolen_size);
355 	if (!dev_priv->vram_addr) {
356 		dev_err(dev->dev, "Failure to map stolen base.\n");
357 		ret = -ENOMEM;
358 		goto err_mutex_destroy;
359 	}
360 
361 	psb_gem_mm_populate_stolen(dev_priv);
362 
363 	return 0;
364 
365 err_mutex_destroy:
366 	mutex_destroy(&dev_priv->mmap_mutex);
367 	return ret;
368 }
369 
370 void psb_gem_mm_fini(struct drm_device *dev)
371 {
372 	struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
373 
374 	iounmap(dev_priv->vram_addr);
375 
376 	mutex_destroy(&dev_priv->mmap_mutex);
377 }
378 
379 /* Re-insert all pinned GEM objects into GTT. */
380 static void psb_gem_mm_populate_resources(struct drm_psb_private *pdev)
381 {
382 	unsigned int restored = 0, total = 0, size = 0;
383 	struct resource *r = pdev->gtt_mem->child;
384 	struct drm_device *dev = &pdev->dev;
385 	struct psb_gem_object *pobj;
386 
387 	while (r) {
388 		/*
389 		 * TODO: GTT restoration needs a refactoring, so that we don't have to touch
390 		 *       struct psb_gem_object here. The type represents a GEM object and is
391 		 *       not related to the GTT itself.
392 		 */
393 		pobj = container_of(r, struct psb_gem_object, resource);
394 		if (pobj->pages) {
395 			psb_gtt_insert_pages(pdev, &pobj->resource, pobj->pages);
396 			size += resource_size(&pobj->resource);
397 			++restored;
398 		}
399 		r = r->sibling;
400 		++total;
401 	}
402 
403 	drm_dbg(dev, "Restored %u of %u gtt ranges (%u KB)", restored, total, (size / 1024));
404 }
405 
406 int psb_gem_mm_resume(struct drm_device *dev)
407 {
408 	struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
409 	struct pci_dev *pdev = to_pci_dev(dev->dev);
410 	unsigned long stolen_size, vram_stolen_size;
411 	struct psb_gtt *pg;
412 
413 	pg = &dev_priv->gtt;
414 
415 	pci_read_config_dword(pdev, PSB_BSM, &dev_priv->stolen_base);
416 	vram_stolen_size = pg->gtt_phys_start - dev_priv->stolen_base - PAGE_SIZE;
417 
418 	stolen_size = vram_stolen_size;
419 
420 	dev_dbg(dev->dev, "Stolen memory base 0x%x, size %luK\n", dev_priv->stolen_base,
421 		vram_stolen_size / 1024);
422 
423 	if (stolen_size != pg->stolen_size) {
424 		dev_err(dev->dev, "GTT resume error.\n");
425 		return -EINVAL;
426 	}
427 
428 	psb_gem_mm_populate_stolen(dev_priv);
429 	psb_gem_mm_populate_resources(dev_priv);
430 
431 	return 0;
432 }
433