xref: /linux/drivers/gpu/drm/qxl/qxl_release.c (revision 1f32f310a13c9fb67a9993ab67f596b3f960206f)
1 /*
2  * Copyright 2011 Red Hat, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * on the rights to use, copy, modify, merge, publish, distribute, sub
8  * license, and/or sell copies of the Software, and to permit persons to whom
9  * the Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
19  * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
20  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
21  */
22 
23 #include <linux/delay.h>
24 
25 #include <drm/drm_print.h>
26 
27 #include <trace/events/dma_fence.h>
28 
29 #include "qxl_drv.h"
30 #include "qxl_object.h"
31 
32 /*
33  * drawable cmd cache - allocate a bunch of VRAM pages, suballocate
34  * into 256 byte chunks for now - gives 16 cmds per page.
35  *
36  * use an ida to index into the chunks?
37  */
38 /* manage releaseables */
39 /* stack them 16 high for now -drawable object is 191 */
40 #define RELEASE_SIZE 256
41 #define RELEASES_PER_BO (PAGE_SIZE / RELEASE_SIZE)
42 /* put an alloc/dealloc surface cmd into one bo and round up to 128 */
43 #define SURFACE_RELEASE_SIZE 128
44 #define SURFACE_RELEASES_PER_BO (PAGE_SIZE / SURFACE_RELEASE_SIZE)
45 
46 static const int release_size_per_bo[] = { RELEASE_SIZE, SURFACE_RELEASE_SIZE, RELEASE_SIZE };
47 static const int releases_per_bo[] = { RELEASES_PER_BO, SURFACE_RELEASES_PER_BO, RELEASES_PER_BO };
48 
49 static const char *qxl_get_driver_name(struct dma_fence *fence)
50 {
51 	return "qxl";
52 }
53 
54 static const char *qxl_get_timeline_name(struct dma_fence *fence)
55 {
56 	return "release";
57 }
58 
59 static long qxl_fence_wait(struct dma_fence *fence, bool intr,
60 			   signed long timeout)
61 {
62 	struct qxl_device *qdev;
63 	unsigned long cur, end = jiffies + timeout;
64 
65 	qdev = container_of(fence->extern_lock, struct qxl_device,
66 			    release_lock);
67 
68 	if (!wait_event_timeout(qdev->release_event,
69 				(dma_fence_is_signaled(fence) ||
70 				 (qxl_io_notify_oom(qdev), 0)),
71 				timeout))
72 		return 0;
73 
74 	cur = jiffies;
75 	if (time_after(cur, end))
76 		return 0;
77 	return end - cur;
78 }
79 
80 static const struct dma_fence_ops qxl_fence_ops = {
81 	.get_driver_name = qxl_get_driver_name,
82 	.get_timeline_name = qxl_get_timeline_name,
83 	.wait = qxl_fence_wait,
84 };
85 
86 static int
87 qxl_release_alloc(struct qxl_device *qdev, int type,
88 		  struct qxl_release **ret)
89 {
90 	struct qxl_release *release;
91 	int handle;
92 	size_t size = sizeof(*release);
93 
94 	release = kmalloc(size, GFP_KERNEL);
95 	if (!release) {
96 		DRM_ERROR("Out of memory\n");
97 		return -ENOMEM;
98 	}
99 	release->base.ops = NULL;
100 	release->type = type;
101 	release->release_offset = 0;
102 	release->surface_release_id = 0;
103 	INIT_LIST_HEAD(&release->bos);
104 
105 	idr_preload(GFP_KERNEL);
106 	spin_lock(&qdev->release_idr_lock);
107 	handle = idr_alloc(&qdev->release_idr, release, 1, 0, GFP_NOWAIT);
108 	release->base.seqno = ++qdev->release_seqno;
109 	spin_unlock(&qdev->release_idr_lock);
110 	idr_preload_end();
111 	if (handle < 0) {
112 		kfree(release);
113 		*ret = NULL;
114 		return handle;
115 	}
116 	*ret = release;
117 	DRM_DEBUG_DRIVER("allocated release %d\n", handle);
118 	release->id = handle;
119 	return handle;
120 }
121 
122 static void
123 qxl_release_free_list(struct qxl_release *release)
124 {
125 	while (!list_empty(&release->bos)) {
126 		struct qxl_bo_list *entry;
127 
128 		entry = container_of(release->bos.next,
129 				     struct qxl_bo_list, list);
130 		qxl_bo_unref(&entry->bo);
131 		list_del(&entry->list);
132 		kfree(entry);
133 	}
134 	release->release_bo = NULL;
135 }
136 
137 void
138 qxl_release_free(struct qxl_device *qdev,
139 		 struct qxl_release *release)
140 {
141 	DRM_DEBUG_DRIVER("release %d, type %d\n", release->id, release->type);
142 
143 	if (release->surface_release_id)
144 		qxl_surface_id_dealloc(qdev, release->surface_release_id);
145 
146 	spin_lock(&qdev->release_idr_lock);
147 	idr_remove(&qdev->release_idr, release->id);
148 	spin_unlock(&qdev->release_idr_lock);
149 
150 	if (dma_fence_was_initialized(&release->base)) {
151 		WARN_ON(list_empty(&release->bos));
152 		qxl_release_free_list(release);
153 
154 		dma_fence_signal(&release->base);
155 		dma_fence_put(&release->base);
156 	} else {
157 		qxl_release_free_list(release);
158 		kfree(release);
159 	}
160 	atomic_dec(&qdev->release_count);
161 }
162 
163 static int qxl_release_bo_alloc(struct qxl_device *qdev,
164 				struct qxl_bo **bo,
165 				u32 priority)
166 {
167 	/* pin releases bo's they are too messy to evict */
168 	return qxl_bo_create(qdev, PAGE_SIZE, false, true,
169 			     QXL_GEM_DOMAIN_VRAM, priority, NULL, bo);
170 }
171 
172 int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo)
173 {
174 	struct qxl_bo_list *entry;
175 
176 	list_for_each_entry(entry, &release->bos, list) {
177 		if (entry->bo == bo)
178 			return 0;
179 	}
180 
181 	entry = kmalloc_obj(struct qxl_bo_list);
182 	if (!entry)
183 		return -ENOMEM;
184 
185 	qxl_bo_ref(bo);
186 	entry->bo = bo;
187 	list_add_tail(&entry->list, &release->bos);
188 	return 0;
189 }
190 
191 static int qxl_release_validate_bo(struct qxl_bo *bo)
192 {
193 	struct ttm_operation_ctx ctx = { true, false };
194 	int ret;
195 
196 	if (!bo->tbo.pin_count) {
197 		qxl_ttm_placement_from_domain(bo, bo->type);
198 		ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
199 		if (ret)
200 			return ret;
201 	}
202 
203 	ret = dma_resv_reserve_fences(bo->tbo.base.resv, 1);
204 	if (ret)
205 		return ret;
206 
207 	/* allocate a surface for reserved + validated buffers */
208 	ret = qxl_bo_check_id(to_qxl(bo->tbo.base.dev), bo);
209 	if (ret)
210 		return ret;
211 	return 0;
212 }
213 
214 int qxl_release_reserve_list(struct qxl_release *release, bool no_intr)
215 {
216 	int ret;
217 	struct qxl_bo_list *entry;
218 
219 	/* if only one object on the release its the release itself
220 	   since these objects are pinned no need to reserve */
221 	if (list_is_singular(&release->bos))
222 		return 0;
223 
224 	drm_exec_init(&release->exec, no_intr ? 0 :
225 		      DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
226 	drm_exec_until_all_locked(&release->exec) {
227 		list_for_each_entry(entry, &release->bos, list) {
228 			ret = drm_exec_prepare_obj(&release->exec,
229 						   &entry->bo->tbo.base,
230 						   1);
231 			drm_exec_retry_on_contention(&release->exec);
232 			if (ret)
233 				goto error;
234 		}
235 	}
236 
237 	list_for_each_entry(entry, &release->bos, list) {
238 		ret = qxl_release_validate_bo(entry->bo);
239 		if (ret)
240 			goto error;
241 	}
242 	return 0;
243 error:
244 	drm_exec_fini(&release->exec);
245 	return ret;
246 }
247 
248 void qxl_release_backoff_reserve_list(struct qxl_release *release)
249 {
250 	/* if only one object on the release its the release itself
251 	   since these objects are pinned no need to reserve */
252 	if (list_is_singular(&release->bos))
253 		return;
254 
255 	drm_exec_fini(&release->exec);
256 }
257 
258 int qxl_alloc_surface_release_reserved(struct qxl_device *qdev,
259 				       enum qxl_surface_cmd_type surface_cmd_type,
260 				       struct qxl_release *create_rel,
261 				       struct qxl_release **release)
262 {
263 	if (surface_cmd_type == QXL_SURFACE_CMD_DESTROY && create_rel) {
264 		int idr_ret;
265 		struct qxl_bo *bo;
266 		union qxl_release_info *info;
267 
268 		/* stash the release after the create command */
269 		idr_ret = qxl_release_alloc(qdev, QXL_RELEASE_SURFACE_CMD, release);
270 		if (idr_ret < 0)
271 			return idr_ret;
272 		bo = create_rel->release_bo;
273 
274 		(*release)->release_bo = bo;
275 		(*release)->release_offset = create_rel->release_offset + 64;
276 
277 		qxl_release_list_add(*release, bo);
278 
279 		info = qxl_release_map(qdev, *release);
280 		info->id = idr_ret;
281 		qxl_release_unmap(qdev, *release, info);
282 		return 0;
283 	}
284 
285 	return qxl_alloc_release_reserved(qdev, sizeof(struct qxl_surface_cmd),
286 					 QXL_RELEASE_SURFACE_CMD, release, NULL);
287 }
288 
289 int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
290 				       int type, struct qxl_release **release,
291 				       struct qxl_bo **rbo)
292 {
293 	struct qxl_bo *bo, *free_bo = NULL;
294 	int idr_ret;
295 	int ret = 0;
296 	union qxl_release_info *info;
297 	int cur_idx;
298 	u32 priority;
299 
300 	if (type == QXL_RELEASE_DRAWABLE) {
301 		cur_idx = 0;
302 		priority = 0;
303 	} else if (type == QXL_RELEASE_SURFACE_CMD) {
304 		cur_idx = 1;
305 		priority = 1;
306 	} else if (type == QXL_RELEASE_CURSOR_CMD) {
307 		cur_idx = 2;
308 		priority = 1;
309 	}
310 	else {
311 		DRM_ERROR("got illegal type: %d\n", type);
312 		return -EINVAL;
313 	}
314 
315 	idr_ret = qxl_release_alloc(qdev, type, release);
316 	if (idr_ret < 0) {
317 		if (rbo)
318 			*rbo = NULL;
319 		return idr_ret;
320 	}
321 	atomic_inc(&qdev->release_count);
322 
323 	mutex_lock(&qdev->release_mutex);
324 	if (qdev->current_release_bo_offset[cur_idx] + 1 >= releases_per_bo[cur_idx]) {
325 		free_bo = qdev->current_release_bo[cur_idx];
326 		qdev->current_release_bo_offset[cur_idx] = 0;
327 		qdev->current_release_bo[cur_idx] = NULL;
328 	}
329 	if (!qdev->current_release_bo[cur_idx]) {
330 		ret = qxl_release_bo_alloc(qdev, &qdev->current_release_bo[cur_idx], priority);
331 		if (ret) {
332 			mutex_unlock(&qdev->release_mutex);
333 			if (free_bo) {
334 				qxl_bo_unpin(free_bo);
335 				qxl_bo_unref(&free_bo);
336 			}
337 			qxl_release_free(qdev, *release);
338 			return ret;
339 		}
340 	}
341 
342 	bo = qxl_bo_ref(qdev->current_release_bo[cur_idx]);
343 
344 	(*release)->release_bo = bo;
345 	(*release)->release_offset = qdev->current_release_bo_offset[cur_idx] * release_size_per_bo[cur_idx];
346 	qdev->current_release_bo_offset[cur_idx]++;
347 
348 	if (rbo)
349 		*rbo = bo;
350 
351 	mutex_unlock(&qdev->release_mutex);
352 	if (free_bo) {
353 		qxl_bo_unpin(free_bo);
354 		qxl_bo_unref(&free_bo);
355 	}
356 
357 	ret = qxl_release_list_add(*release, bo);
358 	qxl_bo_unref(&bo);
359 	if (ret) {
360 		qxl_release_free(qdev, *release);
361 		return ret;
362 	}
363 
364 	info = qxl_release_map(qdev, *release);
365 	info->id = idr_ret;
366 	qxl_release_unmap(qdev, *release, info);
367 
368 	return ret;
369 }
370 
371 struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev,
372 						   uint64_t id)
373 {
374 	struct qxl_release *release;
375 
376 	spin_lock(&qdev->release_idr_lock);
377 	release = idr_find(&qdev->release_idr, id);
378 	spin_unlock(&qdev->release_idr_lock);
379 	if (!release) {
380 		DRM_ERROR("failed to find id in release_idr\n");
381 		return NULL;
382 	}
383 
384 	return release;
385 }
386 
387 union qxl_release_info *qxl_release_map(struct qxl_device *qdev,
388 					struct qxl_release *release)
389 {
390 	void *ptr;
391 	union qxl_release_info *info;
392 	struct qxl_bo *bo = release->release_bo;
393 
394 	ptr = qxl_bo_kmap_atomic_page(qdev, bo, release->release_offset & PAGE_MASK);
395 	if (!ptr)
396 		return NULL;
397 	info = ptr + (release->release_offset & ~PAGE_MASK);
398 	return info;
399 }
400 
401 void qxl_release_unmap(struct qxl_device *qdev,
402 		       struct qxl_release *release,
403 		       union qxl_release_info *info)
404 {
405 	struct qxl_bo *bo = release->release_bo;
406 	void *ptr;
407 
408 	ptr = ((void *)info) - (release->release_offset & ~PAGE_MASK);
409 	qxl_bo_kunmap_atomic_page(qdev, bo, ptr);
410 }
411 
412 void qxl_release_fence_buffer_objects(struct qxl_release *release)
413 {
414 	struct ttm_device *bdev;
415 	struct qxl_bo_list *entry;
416 	struct qxl_device *qdev;
417 	struct qxl_bo *bo;
418 
419 	/* if only one object on the release its the release itself
420 	   since these objects are pinned no need to reserve */
421 	if (list_is_singular(&release->bos) || list_empty(&release->bos))
422 		return;
423 
424 	bo = list_first_entry(&release->bos, struct qxl_bo_list, list)->bo;
425 	bdev = bo->tbo.bdev;
426 	qdev = container_of(bdev, struct qxl_device, mman.bdev);
427 
428 	/*
429 	 * Since we never really allocated a context and we don't want to conflict,
430 	 * set the highest bits. This will break if we really allow exporting of dma-bufs.
431 	 */
432 	dma_fence_init(&release->base, &qxl_fence_ops, &qdev->release_lock,
433 		       release->id | 0xf0000000, release->base.seqno);
434 	trace_dma_fence_emit(&release->base);
435 
436 	list_for_each_entry(entry, &release->bos, list) {
437 		bo = entry->bo;
438 
439 		dma_resv_add_fence(bo->tbo.base.resv, &release->base,
440 				   DMA_RESV_USAGE_READ);
441 		ttm_bo_move_to_lru_tail_unlocked(&bo->tbo);
442 	}
443 	drm_exec_fini(&release->exec);
444 }
445