xref: /linux/drivers/gpu/drm/msm/msm_gem_submit.c (revision ca55b2fef3a9373fcfc30f82fd26bc7fccbda732)
1 /*
2  * Copyright (C) 2013 Red Hat
3  * Author: Rob Clark <robdclark@gmail.com>
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 as published by
7  * the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  *
14  * You should have received a copy of the GNU General Public License along with
15  * this program.  If not, see <http://www.gnu.org/licenses/>.
16  */
17 
18 #include "msm_drv.h"
19 #include "msm_gpu.h"
20 #include "msm_gem.h"
21 
22 /*
23  * Cmdstream submission:
24  */
25 
26 /* make sure these don't conflict w/ MSM_SUBMIT_BO_x */
27 #define BO_VALID    0x8000
28 #define BO_LOCKED   0x4000
29 #define BO_PINNED   0x2000
30 
31 static inline void __user *to_user_ptr(u64 address)
32 {
33 	return (void __user *)(uintptr_t)address;
34 }
35 
36 static struct msm_gem_submit *submit_create(struct drm_device *dev,
37 		struct msm_gpu *gpu, int nr)
38 {
39 	struct msm_gem_submit *submit;
40 	int sz = sizeof(*submit) + (nr * sizeof(submit->bos[0]));
41 
42 	submit = kmalloc(sz, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
43 	if (submit) {
44 		submit->dev = dev;
45 		submit->gpu = gpu;
46 
47 		/* initially, until copy_from_user() and bo lookup succeeds: */
48 		submit->nr_bos = 0;
49 		submit->nr_cmds = 0;
50 
51 		INIT_LIST_HEAD(&submit->bo_list);
52 		ww_acquire_init(&submit->ticket, &reservation_ww_class);
53 	}
54 
55 	return submit;
56 }
57 
58 static int submit_lookup_objects(struct msm_gem_submit *submit,
59 		struct drm_msm_gem_submit *args, struct drm_file *file)
60 {
61 	unsigned i;
62 	int ret = 0;
63 
64 	spin_lock(&file->table_lock);
65 
66 	for (i = 0; i < args->nr_bos; i++) {
67 		struct drm_msm_gem_submit_bo submit_bo;
68 		struct drm_gem_object *obj;
69 		struct msm_gem_object *msm_obj;
70 		void __user *userptr =
71 			to_user_ptr(args->bos + (i * sizeof(submit_bo)));
72 
73 		ret = copy_from_user(&submit_bo, userptr, sizeof(submit_bo));
74 		if (ret) {
75 			ret = -EFAULT;
76 			goto out_unlock;
77 		}
78 
79 		if (submit_bo.flags & ~MSM_SUBMIT_BO_FLAGS) {
80 			DRM_ERROR("invalid flags: %x\n", submit_bo.flags);
81 			ret = -EINVAL;
82 			goto out_unlock;
83 		}
84 
85 		submit->bos[i].flags = submit_bo.flags;
86 		/* in validate_objects() we figure out if this is true: */
87 		submit->bos[i].iova  = submit_bo.presumed;
88 
89 		/* normally use drm_gem_object_lookup(), but for bulk lookup
90 		 * all under single table_lock just hit object_idr directly:
91 		 */
92 		obj = idr_find(&file->object_idr, submit_bo.handle);
93 		if (!obj) {
94 			DRM_ERROR("invalid handle %u at index %u\n", submit_bo.handle, i);
95 			ret = -EINVAL;
96 			goto out_unlock;
97 		}
98 
99 		msm_obj = to_msm_bo(obj);
100 
101 		if (!list_empty(&msm_obj->submit_entry)) {
102 			DRM_ERROR("handle %u at index %u already on submit list\n",
103 					submit_bo.handle, i);
104 			ret = -EINVAL;
105 			goto out_unlock;
106 		}
107 
108 		drm_gem_object_reference(obj);
109 
110 		submit->bos[i].obj = msm_obj;
111 
112 		list_add_tail(&msm_obj->submit_entry, &submit->bo_list);
113 	}
114 
115 out_unlock:
116 	submit->nr_bos = i;
117 	spin_unlock(&file->table_lock);
118 
119 	return ret;
120 }
121 
122 static void submit_unlock_unpin_bo(struct msm_gem_submit *submit, int i)
123 {
124 	struct msm_gem_object *msm_obj = submit->bos[i].obj;
125 
126 	if (submit->bos[i].flags & BO_PINNED)
127 		msm_gem_put_iova(&msm_obj->base, submit->gpu->id);
128 
129 	if (submit->bos[i].flags & BO_LOCKED)
130 		ww_mutex_unlock(&msm_obj->resv->lock);
131 
132 	if (!(submit->bos[i].flags & BO_VALID))
133 		submit->bos[i].iova = 0;
134 
135 	submit->bos[i].flags &= ~(BO_LOCKED | BO_PINNED);
136 }
137 
138 /* This is where we make sure all the bo's are reserved and pin'd: */
139 static int submit_validate_objects(struct msm_gem_submit *submit)
140 {
141 	int contended, slow_locked = -1, i, ret = 0;
142 
143 retry:
144 	submit->valid = true;
145 
146 	for (i = 0; i < submit->nr_bos; i++) {
147 		struct msm_gem_object *msm_obj = submit->bos[i].obj;
148 		uint32_t iova;
149 
150 		if (slow_locked == i)
151 			slow_locked = -1;
152 
153 		contended = i;
154 
155 		if (!(submit->bos[i].flags & BO_LOCKED)) {
156 			ret = ww_mutex_lock_interruptible(&msm_obj->resv->lock,
157 					&submit->ticket);
158 			if (ret)
159 				goto fail;
160 			submit->bos[i].flags |= BO_LOCKED;
161 		}
162 
163 
164 		/* if locking succeeded, pin bo: */
165 		ret = msm_gem_get_iova_locked(&msm_obj->base,
166 				submit->gpu->id, &iova);
167 
168 		/* this would break the logic in the fail path.. there is no
169 		 * reason for this to happen, but just to be on the safe side
170 		 * let's notice if this starts happening in the future:
171 		 */
172 		WARN_ON(ret == -EDEADLK);
173 
174 		if (ret)
175 			goto fail;
176 
177 		submit->bos[i].flags |= BO_PINNED;
178 
179 		if (iova == submit->bos[i].iova) {
180 			submit->bos[i].flags |= BO_VALID;
181 		} else {
182 			submit->bos[i].iova = iova;
183 			submit->bos[i].flags &= ~BO_VALID;
184 			submit->valid = false;
185 		}
186 	}
187 
188 	ww_acquire_done(&submit->ticket);
189 
190 	return 0;
191 
192 fail:
193 	for (; i >= 0; i--)
194 		submit_unlock_unpin_bo(submit, i);
195 
196 	if (slow_locked > 0)
197 		submit_unlock_unpin_bo(submit, slow_locked);
198 
199 	if (ret == -EDEADLK) {
200 		struct msm_gem_object *msm_obj = submit->bos[contended].obj;
201 		/* we lost out in a seqno race, lock and retry.. */
202 		ret = ww_mutex_lock_slow_interruptible(&msm_obj->resv->lock,
203 				&submit->ticket);
204 		if (!ret) {
205 			submit->bos[contended].flags |= BO_LOCKED;
206 			slow_locked = contended;
207 			goto retry;
208 		}
209 	}
210 
211 	return ret;
212 }
213 
214 static int submit_bo(struct msm_gem_submit *submit, uint32_t idx,
215 		struct msm_gem_object **obj, uint32_t *iova, bool *valid)
216 {
217 	if (idx >= submit->nr_bos) {
218 		DRM_ERROR("invalid buffer index: %u (out of %u)\n",
219 				idx, submit->nr_bos);
220 		return -EINVAL;
221 	}
222 
223 	if (obj)
224 		*obj = submit->bos[idx].obj;
225 	if (iova)
226 		*iova = submit->bos[idx].iova;
227 	if (valid)
228 		*valid = !!(submit->bos[idx].flags & BO_VALID);
229 
230 	return 0;
231 }
232 
233 /* process the reloc's and patch up the cmdstream as needed: */
234 static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *obj,
235 		uint32_t offset, uint32_t nr_relocs, uint64_t relocs)
236 {
237 	uint32_t i, last_offset = 0;
238 	uint32_t *ptr;
239 	int ret;
240 
241 	if (offset % 4) {
242 		DRM_ERROR("non-aligned cmdstream buffer: %u\n", offset);
243 		return -EINVAL;
244 	}
245 
246 	/* For now, just map the entire thing.  Eventually we probably
247 	 * to do it page-by-page, w/ kmap() if not vmap()d..
248 	 */
249 	ptr = msm_gem_vaddr_locked(&obj->base);
250 
251 	if (IS_ERR(ptr)) {
252 		ret = PTR_ERR(ptr);
253 		DBG("failed to map: %d", ret);
254 		return ret;
255 	}
256 
257 	for (i = 0; i < nr_relocs; i++) {
258 		struct drm_msm_gem_submit_reloc submit_reloc;
259 		void __user *userptr =
260 			to_user_ptr(relocs + (i * sizeof(submit_reloc)));
261 		uint32_t iova, off;
262 		bool valid;
263 
264 		ret = copy_from_user(&submit_reloc, userptr, sizeof(submit_reloc));
265 		if (ret)
266 			return -EFAULT;
267 
268 		if (submit_reloc.submit_offset % 4) {
269 			DRM_ERROR("non-aligned reloc offset: %u\n",
270 					submit_reloc.submit_offset);
271 			return -EINVAL;
272 		}
273 
274 		/* offset in dwords: */
275 		off = submit_reloc.submit_offset / 4;
276 
277 		if ((off >= (obj->base.size / 4)) ||
278 				(off < last_offset)) {
279 			DRM_ERROR("invalid offset %u at reloc %u\n", off, i);
280 			return -EINVAL;
281 		}
282 
283 		ret = submit_bo(submit, submit_reloc.reloc_idx, NULL, &iova, &valid);
284 		if (ret)
285 			return ret;
286 
287 		if (valid)
288 			continue;
289 
290 		iova += submit_reloc.reloc_offset;
291 
292 		if (submit_reloc.shift < 0)
293 			iova >>= -submit_reloc.shift;
294 		else
295 			iova <<= submit_reloc.shift;
296 
297 		ptr[off] = iova | submit_reloc.or;
298 
299 		last_offset = off;
300 	}
301 
302 	return 0;
303 }
304 
305 static void submit_cleanup(struct msm_gem_submit *submit, bool fail)
306 {
307 	unsigned i;
308 
309 	for (i = 0; i < submit->nr_bos; i++) {
310 		struct msm_gem_object *msm_obj = submit->bos[i].obj;
311 		submit_unlock_unpin_bo(submit, i);
312 		list_del_init(&msm_obj->submit_entry);
313 		drm_gem_object_unreference(&msm_obj->base);
314 	}
315 
316 	ww_acquire_fini(&submit->ticket);
317 }
318 
319 int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
320 		struct drm_file *file)
321 {
322 	struct msm_drm_private *priv = dev->dev_private;
323 	struct drm_msm_gem_submit *args = data;
324 	struct msm_file_private *ctx = file->driver_priv;
325 	struct msm_gem_submit *submit;
326 	struct msm_gpu *gpu;
327 	unsigned i;
328 	int ret;
329 
330 	/* for now, we just have 3d pipe.. eventually this would need to
331 	 * be more clever to dispatch to appropriate gpu module:
332 	 */
333 	if (args->pipe != MSM_PIPE_3D0)
334 		return -EINVAL;
335 
336 	gpu = priv->gpu;
337 
338 	if (args->nr_cmds > MAX_CMDS)
339 		return -EINVAL;
340 
341 	mutex_lock(&dev->struct_mutex);
342 
343 	submit = submit_create(dev, gpu, args->nr_bos);
344 	if (!submit) {
345 		ret = -ENOMEM;
346 		goto out;
347 	}
348 
349 	ret = submit_lookup_objects(submit, args, file);
350 	if (ret)
351 		goto out;
352 
353 	ret = submit_validate_objects(submit);
354 	if (ret)
355 		goto out;
356 
357 	for (i = 0; i < args->nr_cmds; i++) {
358 		struct drm_msm_gem_submit_cmd submit_cmd;
359 		void __user *userptr =
360 			to_user_ptr(args->cmds + (i * sizeof(submit_cmd)));
361 		struct msm_gem_object *msm_obj;
362 		uint32_t iova;
363 
364 		ret = copy_from_user(&submit_cmd, userptr, sizeof(submit_cmd));
365 		if (ret) {
366 			ret = -EFAULT;
367 			goto out;
368 		}
369 
370 		/* validate input from userspace: */
371 		switch (submit_cmd.type) {
372 		case MSM_SUBMIT_CMD_BUF:
373 		case MSM_SUBMIT_CMD_IB_TARGET_BUF:
374 		case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
375 			break;
376 		default:
377 			DRM_ERROR("invalid type: %08x\n", submit_cmd.type);
378 			ret = -EINVAL;
379 			goto out;
380 		}
381 
382 		ret = submit_bo(submit, submit_cmd.submit_idx,
383 				&msm_obj, &iova, NULL);
384 		if (ret)
385 			goto out;
386 
387 		if (submit_cmd.size % 4) {
388 			DRM_ERROR("non-aligned cmdstream buffer size: %u\n",
389 					submit_cmd.size);
390 			ret = -EINVAL;
391 			goto out;
392 		}
393 
394 		if ((submit_cmd.size + submit_cmd.submit_offset) >=
395 				msm_obj->base.size) {
396 			DRM_ERROR("invalid cmdstream size: %u\n", submit_cmd.size);
397 			ret = -EINVAL;
398 			goto out;
399 		}
400 
401 		submit->cmd[i].type = submit_cmd.type;
402 		submit->cmd[i].size = submit_cmd.size / 4;
403 		submit->cmd[i].iova = iova + submit_cmd.submit_offset;
404 		submit->cmd[i].idx  = submit_cmd.submit_idx;
405 
406 		if (submit->valid)
407 			continue;
408 
409 		ret = submit_reloc(submit, msm_obj, submit_cmd.submit_offset,
410 				submit_cmd.nr_relocs, submit_cmd.relocs);
411 		if (ret)
412 			goto out;
413 	}
414 
415 	submit->nr_cmds = i;
416 
417 	ret = msm_gpu_submit(gpu, submit, ctx);
418 
419 	args->fence = submit->fence;
420 
421 out:
422 	if (submit)
423 		submit_cleanup(submit, !!ret);
424 	mutex_unlock(&dev->struct_mutex);
425 	return ret;
426 }
427