1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2020 NVIDIA Corporation */
3
4 #include <linux/host1x.h>
5 #include <linux/iommu.h>
6 #include <linux/list.h>
7
8 #include <drm/drm_drv.h>
9 #include <drm/drm_file.h>
10 #include <drm/drm_utils.h>
11
12 #include "drm.h"
13 #include "uapi.h"
14
tegra_drm_mapping_release(struct kref * ref)15 static void tegra_drm_mapping_release(struct kref *ref)
16 {
17 struct tegra_drm_mapping *mapping =
18 container_of(ref, struct tegra_drm_mapping, ref);
19
20 host1x_bo_unpin(mapping->map);
21 host1x_bo_put(mapping->bo);
22
23 kfree(mapping);
24 }
25
tegra_drm_mapping_put(struct tegra_drm_mapping * mapping)26 void tegra_drm_mapping_put(struct tegra_drm_mapping *mapping)
27 {
28 kref_put(&mapping->ref, tegra_drm_mapping_release);
29 }
30
tegra_drm_channel_context_close(struct tegra_drm_context * context)31 static void tegra_drm_channel_context_close(struct tegra_drm_context *context)
32 {
33 struct tegra_drm_mapping *mapping;
34 unsigned long id;
35
36 if (context->memory_context)
37 host1x_memory_context_put(context->memory_context);
38
39 xa_for_each(&context->mappings, id, mapping)
40 tegra_drm_mapping_put(mapping);
41
42 xa_destroy(&context->mappings);
43
44 host1x_channel_put(context->channel);
45
46 kfree(context);
47 }
48
tegra_drm_uapi_close_file(struct tegra_drm_file * file)49 void tegra_drm_uapi_close_file(struct tegra_drm_file *file)
50 {
51 struct tegra_drm_context *context;
52 struct host1x_syncpt *sp;
53 unsigned long id;
54
55 xa_for_each(&file->contexts, id, context)
56 tegra_drm_channel_context_close(context);
57
58 xa_for_each(&file->syncpoints, id, sp)
59 host1x_syncpt_put(sp);
60
61 xa_destroy(&file->contexts);
62 xa_destroy(&file->syncpoints);
63 }
64
tegra_drm_find_client(struct tegra_drm * tegra,u32 class)65 static struct tegra_drm_client *tegra_drm_find_client(struct tegra_drm *tegra, u32 class)
66 {
67 struct tegra_drm_client *client;
68
69 list_for_each_entry(client, &tegra->clients, list)
70 if (client->base.class == class)
71 return client;
72
73 return NULL;
74 }
75
tegra_drm_ioctl_channel_open(struct drm_device * drm,void * data,struct drm_file * file)76 int tegra_drm_ioctl_channel_open(struct drm_device *drm, void *data, struct drm_file *file)
77 {
78 struct host1x *host = tegra_drm_to_host1x(drm->dev_private);
79 struct tegra_drm_file *fpriv = file->driver_priv;
80 struct tegra_drm *tegra = drm->dev_private;
81 struct drm_tegra_channel_open *args = data;
82 struct tegra_drm_client *client = NULL;
83 struct tegra_drm_context *context;
84 int err;
85
86 if (args->flags)
87 return -EINVAL;
88
89 context = kzalloc(sizeof(*context), GFP_KERNEL);
90 if (!context)
91 return -ENOMEM;
92
93 client = tegra_drm_find_client(tegra, args->host1x_class);
94 if (!client) {
95 err = -ENODEV;
96 goto free;
97 }
98
99 if (client->shared_channel) {
100 context->channel = host1x_channel_get(client->shared_channel);
101 } else {
102 context->channel = host1x_channel_request(&client->base);
103 if (!context->channel) {
104 err = -EBUSY;
105 goto free;
106 }
107 }
108
109 /* Only allocate context if the engine supports context isolation. */
110 if (device_iommu_mapped(client->base.dev) && client->ops->can_use_memory_ctx) {
111 bool supported;
112
113 err = client->ops->can_use_memory_ctx(client, &supported);
114 if (err)
115 goto put_channel;
116
117 if (supported) {
118 struct pid *pid = get_task_pid(current, PIDTYPE_TGID);
119 context->memory_context = host1x_memory_context_alloc(
120 host, client->base.dev, pid);
121 put_pid(pid);
122 }
123
124 if (IS_ERR(context->memory_context)) {
125 if (PTR_ERR(context->memory_context) != -EOPNOTSUPP) {
126 err = PTR_ERR(context->memory_context);
127 goto put_channel;
128 } else {
129 /*
130 * OK, HW does not support contexts or contexts
131 * are disabled.
132 */
133 context->memory_context = NULL;
134 }
135 }
136 }
137
138 err = xa_alloc(&fpriv->contexts, &args->context, context, XA_LIMIT(1, U32_MAX),
139 GFP_KERNEL);
140 if (err < 0)
141 goto put_memctx;
142
143 context->client = client;
144 xa_init_flags(&context->mappings, XA_FLAGS_ALLOC1);
145
146 args->version = client->version;
147 args->capabilities = 0;
148
149 if (device_get_dma_attr(client->base.dev) == DEV_DMA_COHERENT)
150 args->capabilities |= DRM_TEGRA_CHANNEL_CAP_CACHE_COHERENT;
151
152 return 0;
153
154 put_memctx:
155 if (context->memory_context)
156 host1x_memory_context_put(context->memory_context);
157 put_channel:
158 host1x_channel_put(context->channel);
159 free:
160 kfree(context);
161
162 return err;
163 }
164
tegra_drm_ioctl_channel_close(struct drm_device * drm,void * data,struct drm_file * file)165 int tegra_drm_ioctl_channel_close(struct drm_device *drm, void *data, struct drm_file *file)
166 {
167 struct tegra_drm_file *fpriv = file->driver_priv;
168 struct drm_tegra_channel_close *args = data;
169 struct tegra_drm_context *context;
170
171 mutex_lock(&fpriv->lock);
172
173 context = xa_load(&fpriv->contexts, args->context);
174 if (!context) {
175 mutex_unlock(&fpriv->lock);
176 return -EINVAL;
177 }
178
179 xa_erase(&fpriv->contexts, args->context);
180
181 mutex_unlock(&fpriv->lock);
182
183 tegra_drm_channel_context_close(context);
184
185 return 0;
186 }
187
tegra_drm_ioctl_channel_map(struct drm_device * drm,void * data,struct drm_file * file)188 int tegra_drm_ioctl_channel_map(struct drm_device *drm, void *data, struct drm_file *file)
189 {
190 struct tegra_drm_file *fpriv = file->driver_priv;
191 struct drm_tegra_channel_map *args = data;
192 struct tegra_drm_mapping *mapping;
193 struct tegra_drm_context *context;
194 enum dma_data_direction direction;
195 struct device *mapping_dev;
196 int err = 0;
197
198 if (args->flags & ~DRM_TEGRA_CHANNEL_MAP_READ_WRITE)
199 return -EINVAL;
200
201 mutex_lock(&fpriv->lock);
202
203 context = xa_load(&fpriv->contexts, args->context);
204 if (!context) {
205 mutex_unlock(&fpriv->lock);
206 return -EINVAL;
207 }
208
209 mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
210 if (!mapping) {
211 err = -ENOMEM;
212 goto unlock;
213 }
214
215 kref_init(&mapping->ref);
216
217 if (context->memory_context)
218 mapping_dev = &context->memory_context->dev;
219 else
220 mapping_dev = context->client->base.dev;
221
222 mapping->bo = tegra_gem_lookup(file, args->handle);
223 if (!mapping->bo) {
224 err = -EINVAL;
225 goto free;
226 }
227
228 switch (args->flags & DRM_TEGRA_CHANNEL_MAP_READ_WRITE) {
229 case DRM_TEGRA_CHANNEL_MAP_READ_WRITE:
230 direction = DMA_BIDIRECTIONAL;
231 break;
232
233 case DRM_TEGRA_CHANNEL_MAP_WRITE:
234 direction = DMA_FROM_DEVICE;
235 break;
236
237 case DRM_TEGRA_CHANNEL_MAP_READ:
238 direction = DMA_TO_DEVICE;
239 break;
240
241 default:
242 err = -EINVAL;
243 goto put_gem;
244 }
245
246 mapping->map = host1x_bo_pin(mapping_dev, mapping->bo, direction, NULL);
247 if (IS_ERR(mapping->map)) {
248 err = PTR_ERR(mapping->map);
249 goto put_gem;
250 }
251
252 mapping->iova = mapping->map->phys;
253 mapping->iova_end = mapping->iova + host1x_to_tegra_bo(mapping->bo)->gem.size;
254
255 err = xa_alloc(&context->mappings, &args->mapping, mapping, XA_LIMIT(1, U32_MAX),
256 GFP_KERNEL);
257 if (err < 0)
258 goto unpin;
259
260 mutex_unlock(&fpriv->lock);
261
262 return 0;
263
264 unpin:
265 host1x_bo_unpin(mapping->map);
266 put_gem:
267 host1x_bo_put(mapping->bo);
268 free:
269 kfree(mapping);
270 unlock:
271 mutex_unlock(&fpriv->lock);
272 return err;
273 }
274
tegra_drm_ioctl_channel_unmap(struct drm_device * drm,void * data,struct drm_file * file)275 int tegra_drm_ioctl_channel_unmap(struct drm_device *drm, void *data, struct drm_file *file)
276 {
277 struct tegra_drm_file *fpriv = file->driver_priv;
278 struct drm_tegra_channel_unmap *args = data;
279 struct tegra_drm_mapping *mapping;
280 struct tegra_drm_context *context;
281
282 mutex_lock(&fpriv->lock);
283
284 context = xa_load(&fpriv->contexts, args->context);
285 if (!context) {
286 mutex_unlock(&fpriv->lock);
287 return -EINVAL;
288 }
289
290 mapping = xa_erase(&context->mappings, args->mapping);
291
292 mutex_unlock(&fpriv->lock);
293
294 if (!mapping)
295 return -EINVAL;
296
297 tegra_drm_mapping_put(mapping);
298 return 0;
299 }
300
tegra_drm_ioctl_syncpoint_allocate(struct drm_device * drm,void * data,struct drm_file * file)301 int tegra_drm_ioctl_syncpoint_allocate(struct drm_device *drm, void *data, struct drm_file *file)
302 {
303 struct host1x *host1x = tegra_drm_to_host1x(drm->dev_private);
304 struct tegra_drm_file *fpriv = file->driver_priv;
305 struct drm_tegra_syncpoint_allocate *args = data;
306 struct host1x_syncpt *sp;
307 int err;
308
309 if (args->id)
310 return -EINVAL;
311
312 sp = host1x_syncpt_alloc(host1x, HOST1X_SYNCPT_CLIENT_MANAGED, current->comm);
313 if (!sp)
314 return -EBUSY;
315
316 args->id = host1x_syncpt_id(sp);
317
318 err = xa_insert(&fpriv->syncpoints, args->id, sp, GFP_KERNEL);
319 if (err) {
320 host1x_syncpt_put(sp);
321 return err;
322 }
323
324 return 0;
325 }
326
tegra_drm_ioctl_syncpoint_free(struct drm_device * drm,void * data,struct drm_file * file)327 int tegra_drm_ioctl_syncpoint_free(struct drm_device *drm, void *data, struct drm_file *file)
328 {
329 struct tegra_drm_file *fpriv = file->driver_priv;
330 struct drm_tegra_syncpoint_allocate *args = data;
331 struct host1x_syncpt *sp;
332
333 mutex_lock(&fpriv->lock);
334 sp = xa_erase(&fpriv->syncpoints, args->id);
335 mutex_unlock(&fpriv->lock);
336
337 if (!sp)
338 return -EINVAL;
339
340 host1x_syncpt_put(sp);
341
342 return 0;
343 }
344
tegra_drm_ioctl_syncpoint_wait(struct drm_device * drm,void * data,struct drm_file * file)345 int tegra_drm_ioctl_syncpoint_wait(struct drm_device *drm, void *data, struct drm_file *file)
346 {
347 struct host1x *host1x = tegra_drm_to_host1x(drm->dev_private);
348 struct drm_tegra_syncpoint_wait *args = data;
349 signed long timeout_jiffies;
350 struct host1x_syncpt *sp;
351
352 if (args->padding != 0)
353 return -EINVAL;
354
355 sp = host1x_syncpt_get_by_id_noref(host1x, args->id);
356 if (!sp)
357 return -EINVAL;
358
359 timeout_jiffies = drm_timeout_abs_to_jiffies(args->timeout_ns);
360
361 return host1x_syncpt_wait(sp, args->threshold, timeout_jiffies, &args->value);
362 }
363