xref: /linux/drivers/gpu/drm/panfrost/panfrost_gem.c (revision 17cfcb68af3bc7d5e8ae08779b1853310a2949f3)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
3 
4 #include <linux/err.h>
5 #include <linux/slab.h>
6 #include <linux/dma-buf.h>
7 #include <linux/dma-mapping.h>
8 
9 #include <drm/panfrost_drm.h>
10 #include "panfrost_device.h"
11 #include "panfrost_gem.h"
12 #include "panfrost_mmu.h"
13 
14 /* Called DRM core on the last userspace/kernel unreference of the
15  * BO.
16  */
17 static void panfrost_gem_free_object(struct drm_gem_object *obj)
18 {
19 	struct panfrost_gem_object *bo = to_panfrost_bo(obj);
20 	struct panfrost_device *pfdev = obj->dev->dev_private;
21 
22 	if (bo->sgts) {
23 		int i;
24 		int n_sgt = bo->base.base.size / SZ_2M;
25 
26 		for (i = 0; i < n_sgt; i++) {
27 			if (bo->sgts[i].sgl) {
28 				dma_unmap_sg(pfdev->dev, bo->sgts[i].sgl,
29 					     bo->sgts[i].nents, DMA_BIDIRECTIONAL);
30 				sg_free_table(&bo->sgts[i]);
31 			}
32 		}
33 		kfree(bo->sgts);
34 	}
35 
36 	mutex_lock(&pfdev->shrinker_lock);
37 	if (!list_empty(&bo->base.madv_list))
38 		list_del(&bo->base.madv_list);
39 	mutex_unlock(&pfdev->shrinker_lock);
40 
41 	drm_gem_shmem_free_object(obj);
42 }
43 
44 static int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv)
45 {
46 	int ret;
47 	size_t size = obj->size;
48 	u64 align;
49 	struct panfrost_gem_object *bo = to_panfrost_bo(obj);
50 	unsigned long color = bo->noexec ? PANFROST_BO_NOEXEC : 0;
51 	struct panfrost_file_priv *priv = file_priv->driver_priv;
52 
53 	/*
54 	 * Executable buffers cannot cross a 16MB boundary as the program
55 	 * counter is 24-bits. We assume executable buffers will be less than
56 	 * 16MB and aligning executable buffers to their size will avoid
57 	 * crossing a 16MB boundary.
58 	 */
59 	if (!bo->noexec)
60 		align = size >> PAGE_SHIFT;
61 	else
62 		align = size >= SZ_2M ? SZ_2M >> PAGE_SHIFT : 0;
63 
64 	bo->mmu = &priv->mmu;
65 	spin_lock(&priv->mm_lock);
66 	ret = drm_mm_insert_node_generic(&priv->mm, &bo->node,
67 					 size >> PAGE_SHIFT, align, color, 0);
68 	spin_unlock(&priv->mm_lock);
69 	if (ret)
70 		return ret;
71 
72 	if (!bo->is_heap) {
73 		ret = panfrost_mmu_map(bo);
74 		if (ret) {
75 			spin_lock(&priv->mm_lock);
76 			drm_mm_remove_node(&bo->node);
77 			spin_unlock(&priv->mm_lock);
78 		}
79 	}
80 	return ret;
81 }
82 
83 static void panfrost_gem_close(struct drm_gem_object *obj, struct drm_file *file_priv)
84 {
85 	struct panfrost_gem_object *bo = to_panfrost_bo(obj);
86 	struct panfrost_file_priv *priv = file_priv->driver_priv;
87 
88 	if (bo->is_mapped)
89 		panfrost_mmu_unmap(bo);
90 
91 	spin_lock(&priv->mm_lock);
92 	if (drm_mm_node_allocated(&bo->node))
93 		drm_mm_remove_node(&bo->node);
94 	spin_unlock(&priv->mm_lock);
95 }
96 
97 static int panfrost_gem_pin(struct drm_gem_object *obj)
98 {
99 	if (to_panfrost_bo(obj)->is_heap)
100 		return -EINVAL;
101 
102 	return drm_gem_shmem_pin(obj);
103 }
104 
105 static const struct drm_gem_object_funcs panfrost_gem_funcs = {
106 	.free = panfrost_gem_free_object,
107 	.open = panfrost_gem_open,
108 	.close = panfrost_gem_close,
109 	.print_info = drm_gem_shmem_print_info,
110 	.pin = panfrost_gem_pin,
111 	.unpin = drm_gem_shmem_unpin,
112 	.get_sg_table = drm_gem_shmem_get_sg_table,
113 	.vmap = drm_gem_shmem_vmap,
114 	.vunmap = drm_gem_shmem_vunmap,
115 	.mmap = drm_gem_shmem_mmap,
116 };
117 
118 /**
119  * panfrost_gem_create_object - Implementation of driver->gem_create_object.
120  * @dev: DRM device
121  * @size: Size in bytes of the memory the object will reference
122  *
123  * This lets the GEM helpers allocate object structs for us, and keep
124  * our BO stats correct.
125  */
126 struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t size)
127 {
128 	struct panfrost_gem_object *obj;
129 
130 	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
131 	if (!obj)
132 		return NULL;
133 
134 	obj->base.base.funcs = &panfrost_gem_funcs;
135 
136 	return &obj->base.base;
137 }
138 
139 struct panfrost_gem_object *
140 panfrost_gem_create_with_handle(struct drm_file *file_priv,
141 				struct drm_device *dev, size_t size,
142 				u32 flags,
143 				uint32_t *handle)
144 {
145 	int ret;
146 	struct drm_gem_shmem_object *shmem;
147 	struct panfrost_gem_object *bo;
148 
149 	/* Round up heap allocations to 2MB to keep fault handling simple */
150 	if (flags & PANFROST_BO_HEAP)
151 		size = roundup(size, SZ_2M);
152 
153 	shmem = drm_gem_shmem_create(dev, size);
154 	if (IS_ERR(shmem))
155 		return ERR_CAST(shmem);
156 
157 	bo = to_panfrost_bo(&shmem->base);
158 	bo->noexec = !!(flags & PANFROST_BO_NOEXEC);
159 	bo->is_heap = !!(flags & PANFROST_BO_HEAP);
160 
161 	/*
162 	 * Allocate an id of idr table where the obj is registered
163 	 * and handle has the id what user can see.
164 	 */
165 	ret = drm_gem_handle_create(file_priv, &shmem->base, handle);
166 	/* drop reference from allocate - handle holds it now. */
167 	drm_gem_object_put_unlocked(&shmem->base);
168 	if (ret)
169 		return ERR_PTR(ret);
170 
171 	return bo;
172 }
173 
174 struct drm_gem_object *
175 panfrost_gem_prime_import_sg_table(struct drm_device *dev,
176 				   struct dma_buf_attachment *attach,
177 				   struct sg_table *sgt)
178 {
179 	struct drm_gem_object *obj;
180 	struct panfrost_gem_object *bo;
181 
182 	obj = drm_gem_shmem_prime_import_sg_table(dev, attach, sgt);
183 	if (IS_ERR(obj))
184 		return ERR_CAST(obj);
185 
186 	bo = to_panfrost_bo(obj);
187 	bo->noexec = true;
188 
189 	return obj;
190 }
191